hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
smithwaterman_change_4_stage2_orignal.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[130];
__shared__ int gap_h[130]; //insertion
__shared__ short2 gap_size_h[130]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
//__shared__ char cigar_m[128];
//__shared__ int cigar_int_m[128];
//int final_result;
//int final_i;
//int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M-260; //M which is cacluated by last step in the same thread
gap_v+=-11;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? 200:-150);
prev_gap=MM[threadIdx.x]-260;
step_right=gap_h[threadIdx.x]-11;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
// char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
// __shared__ int cigar_index;
// int segment_length;
// short2 btr;
// char new_state;
// int step_length;
int4 result4;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
// final_result=result_row;
result4.x=read_reference_number.y-1;
result4.y=result_row_index;
result4.z=read_reference_number.x-1-result_row_index;
}
else
{
// final_result=result_col;
result4.x=result_col_index;
result4.y=read_reference_number.x-1;
result4.z=0;
}
//result[offset*3]=final_result;
//printf("%d\n",final_result);
//result4.x=fina_i;
//result4.y=fina_j;
//result4.z=segment_length;
result[offset]=result4;
}
__syncthreads();
offset+=gridDim.x;
}
}
__global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
int4 result4;;
short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
while(offset<size)
{
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if( threadIdx.x==0)
{
result4=result[offset];
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
//printf("\n %d %d\n", final_i,final_j);
cigar_index=0;
if(result4.z>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.z;
cigar_index++;
}
segment_length=0;
state='N';
do
{
btr=direction_index[(result4.x+result4.y)*640+result4.y];
if(btr.x>0)
{
new_state='D';
step_length=btr.x;
result4.x-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
step_length=0-btr.x;
result4.y-=step_length;
}
else
{
new_state='M';
step_length=btr.y;
result4.x-=step_length;
result4.y-=step_length;
}
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(result4.x>=0&&result4.y>=0);
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(result4.y>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.y+1;
cigar_index++;
}
result4.z=result4.x+1;
result4.w=cigar_index;
result[offset]=result4;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
int total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
/* fscanf(file,"%d",&size);
while(!feof(file))
{
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<size;i++)
{
fscanf(file,"%s ",inputdata[i].reference_base);
fscanf(file,"%s ",inputdata[i].read_base);
}
*/
char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
index++;
}
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
int * result_h=(int *) malloc(sizeof(int)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int4 * result_d=(int4 *) (data_d_total+data_size_to_copy);
char * cigar;
hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
hipMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(128);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,direction); //result
// calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result
hipMemcpy(result_h,result_d,size*sizeof(int)*4,hipMemcpyDeviceToHost);
// hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost);
// hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
// for(int i=0;i<size;i++)
{
// printf("%d\n",result_h[i*4+1]);
/* printf("[");
for(int j=0;j<result_h[i*4+3];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
*/ }
hipFree(direction);
free(data_h_total);
hipFree(data_d_total);
free(inputdata);
hipFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000);
return 0;
}
| smithwaterman_change_4_stage2_orignal.cu | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
#include<limits>
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
struct NUM_ADD
{
short2 read_reference_number;
int address_array;
};
__global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result
{
int offset=blockIdx.x;
__shared__ short2 read_reference_number;
__shared__ char * read_base_array;
__shared__ char4 * reference_base_array;
__shared__ int mismatch;
__shared__ int match;
__shared__ int open;
__shared__ int extend;
__shared__ short2 * direction_index;
while(offset<size)
{
if( threadIdx.x==0)
{
read_reference_number=num_add[offset].read_reference_number;
read_base_array=(char *) (data+num_add[offset].address_array);
reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128);
direction_index=(short2 *) (direction+offset*640*1100);
}
__syncthreads();
__shared__ char reference_base_in_char[600];
int hh=(read_reference_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 reference_base_in_thread;
reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory
reference_base_in_char[aa*4]=reference_base_in_thread.x;
reference_base_in_char[aa*4+1]=reference_base_in_thread.y;
reference_base_in_char[aa*4+2]=reference_base_in_thread.z;
reference_base_in_char[aa*4+3]=reference_base_in_thread.w;
}
}
__shared__ int MM[130];
__shared__ int gap_h[130]; //insertion
__shared__ short2 gap_size_h[130]; //insertion
__shared__ int result_col;
__shared__ int result_row;
__shared__ int result_col_index;
__shared__ int result_row_index;
//__shared__ char cigar_m[128];
//__shared__ int cigar_int_m[128];
//int final_result;
//int final_i;
//int final_j;
if(threadIdx.x==0)
{
MM[0]=0;
gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2;
gap_size_h[0].x=0;
gap_size_h[0].y=0;
match=200;
mismatch=-150;
open=-260;
extend=-11;
result_col=-1000000000;//std::numeric_limits<int>::min()/2;
result_row=-1000000000;//std::numeric_limits<int>::min()/2;
// for(int i=0;i<read_reference_number.y;i++)
// printf("%c",reference_base_in_char[i]);
// printf("\n");
// for(int i=0;i<read_reference_number.x;i++)
// printf("%c",read_base_array[i]);
}
__syncthreads();
int read_number=read_reference_number.x;
{
char read_base;
read_base=read_base_array[threadIdx.x];
int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;;
int gap_size_v=0; //Deletion
int M=0; //now
int step_right; //now
int ki=0;//insertion h negetive
//deletion v
int MMM=0;
short mt=0;
short2 curmt;
curmt.x=0;
curmt.y=0;
int current_reference_id=0;
for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_reference_id<read_reference_number.y))
{
int prev_gap=M-260; //M which is cacluated by last step in the same thread
gap_v+=-11;
if(prev_gap>gap_v)
{
gap_v=prev_gap;
gap_size_v=1;
}
else
gap_size_v++;
char reference_base_each=reference_base_in_char[current_reference_id];
M=MMM+(read_base==reference_base_each? 200:-150);
prev_gap=MM[threadIdx.x]-260;
step_right=gap_h[threadIdx.x]-11;
if(prev_gap>step_right)
{
step_right=prev_gap;
ki=1;
}
else
ki=gap_size_h[threadIdx.x].x+1;
bool diag=(M>=gap_v)&&(M>=step_right);
curmt.y=0;
if(diag)
{
curmt.x=0;
//if(threadIdx.x==0||current_reference_id==0)
// curmt.y=0;
// else
curmt.y=mt+1;
// curBtrack=0;
}
else
if(step_right>=gap_v)
{
M=step_right;
curmt.x=0-ki;
// curBtrack=0-ki;
}
else
{
M=gap_v;
curmt.x=gap_size_v;
//curBtrack=gap_size_v;
}
MMM=MM[threadIdx.x];
mt=gap_size_h[threadIdx.x].y;
direction_index[640*j+threadIdx.x]=curmt;
//if(threadIdx.x==read_reference_number.x-3)
//printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack);
if(current_reference_id==read_reference_number.y-1)
{
if(M>=result_row)
{
result_row=M;
result_row_index=threadIdx.x; //
}
//printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x);
}
if(threadIdx.x==read_reference_number.x-1)
{
if(M>=result_col)
{
result_col=M;
result_col_index=current_reference_id; // +1
}
}
current_reference_id++;
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
gap_h[threadIdx.x+1]=step_right;
gap_size_h[threadIdx.x+1].x=ki;
gap_size_h[threadIdx.x+1].y=curmt.y;
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
// char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
// __shared__ int cigar_index;
// int segment_length;
// short2 btr;
// char new_state;
// int step_length;
int4 result4;
if(threadIdx.x==read_reference_number.x-1)
{
//printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index);
if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1))
{
// final_result=result_row;
result4.x=read_reference_number.y-1;
result4.y=result_row_index;
result4.z=read_reference_number.x-1-result_row_index;
}
else
{
// final_result=result_col;
result4.x=result_col_index;
result4.y=read_reference_number.x-1;
result4.z=0;
}
//result[offset*3]=final_result;
//printf("%d\n",final_result);
//result4.x=fina_i;
//result4.y=fina_j;
//result4.z=segment_length;
result[offset]=result4;
}
__syncthreads();
offset+=gridDim.x;
}
}
__global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result
{
int offset=blockIdx.x;
int4 result4;;
short2 * direction_index;
__shared__ char * cigar_store;
__shared__ int *cigar_int_store;
__shared__ char cigar_m[128];
__shared__ int cigar_int_m[128];
while(offset<size)
{
char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion
__shared__ int cigar_index;
int segment_length;
short2 btr;
char new_state;
int step_length;
if( threadIdx.x==0)
{
result4=result[offset];
direction_index=(short2 *) (direction+offset*640*1100);
cigar_store=(char *) (cigar+offset*sizeof(char)*128);
cigar_int_store=(int *) (cigar_int+offset*128);
//printf("\n %d %d\n", final_i,final_j);
cigar_index=0;
if(result4.z>0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.z;
cigar_index++;
}
segment_length=0;
state='N';
do
{
btr=direction_index[(result4.x+result4.y)*640+result4.y];
if(btr.x>0)
{
new_state='D';
step_length=btr.x;
result4.x-=step_length;
}
else
if(btr.x<0)
{
new_state='I';
step_length=0-btr.x;
result4.y-=step_length;
}
else
{
new_state='M';
step_length=btr.y;
result4.x-=step_length;
result4.y-=step_length;
}
if(state=='N') state=new_state;
if(state==new_state)
{
segment_length+=step_length;
}
else
{
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
segment_length=step_length;
cigar_index++;
state=new_state;
}
}while(result4.x>=0&&result4.y>=0);
cigar_m[cigar_index]=state;
cigar_int_m[cigar_index]=segment_length;
cigar_index++;
if(result4.y>=0)
{
cigar_m[cigar_index]='S';
cigar_int_m[cigar_index]=result4.y+1;
cigar_index++;
}
result4.z=result4.x+1;
result4.w=cigar_index;
result[offset]=result4;
/* for(int i=cigar_index-1;i>=0;i--)
{
printf("%d%c",cigar_int_m[i],cigar_m[i]);
}
*/
}
__syncthreads();
if(threadIdx.x<cigar_index && cigar_index<=blockDim.x)
{
// if(threadIdx.x==0)
// printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]);
cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x];
cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x];
// if(threadIdx.x==0)
// printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]);
}
offset+=gridDim.x;
}
}
struct InputData
{
char read_base[600];
char reference_base[600];
};
int main(int artc, char* args[])
{
int total_size=0;
FILE * file;
file=fopen(args[1],"r");
int size;
double computation_time=0;//total_time=0;
timespec start,finish;
/* char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[1]);
strcpy(inputdata[index].read_base,data[1]);
index++;
}
*/
/* fscanf(file,"%d",&size);
while(!feof(file))
{
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<size;i++)
{
fscanf(file,"%s ",inputdata[i].reference_base);
fscanf(file,"%s ",inputdata[i].read_base);
}
*/
char data[200][1000];
for(int i=0;i<101;i++)
{
fscanf(file,"%s ", data[i]);
}
int row=atoi(args[2]);
int col=atoi(args[3]);
size=row*col;
for(int ww=0;ww<1;ww++)
{ int index=0;
InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData)));
for(int i=0;i<row;i++)
for(int j=0;j<col;j++)
{
strcpy(inputdata[index].reference_base,data[i]);
strcpy(inputdata[index].read_base,data[j]);
index++;
}
//data preparation.
char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128);
NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total);
char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align
int data_size=0;
char * data_d_total;
cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4);
int * result_h=(int *) malloc(sizeof(int)*size*4);
char * cigar_h=(char *) malloc(sizeof(char)*size*128);
int * cigar_int_h=(int *) malloc(sizeof(int)*size*128);
for(int i=0;i<size;i++)
{
char4 reference_tep[150];
int read_len=strlen(inputdata[i].read_base);
int ref_len=strlen(inputdata[i].reference_base);
int new_len=(ref_len+4-1)/4;
total_size+=ref_len*read_len;
for(int j=0;j<new_len;j++)
{
reference_tep[j].x=inputdata[i].reference_base[j*4];
if(j*4+1<ref_len)
reference_tep[j].y=inputdata[i].reference_base[j*4+1];
if(j*4+2<ref_len)
reference_tep[j].z=inputdata[i].reference_base[j*4+2];
if(j*4+3<ref_len)
reference_tep[j].w=inputdata[i].reference_base[j*4+3];
}
data_num_add[i].read_reference_number.x=read_len;
data_num_add[i].read_reference_number.y=ref_len;
data_num_add[i].address_array=data_size;
memcpy(data_h,inputdata[i].read_base,read_len);
data_h+=(read_len+128-1)/128*128;
data_size+=(read_len+128-1)/128*128;
memcpy(data_h,reference_tep,sizeof(char4)* new_len);
data_h+=(new_len*sizeof(char4)+127)/128*128;
data_size+=(new_len*sizeof(char4)+127)/128*128;
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total);
char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128;
int4 * result_d=(int4 *) (data_d_total+data_size_to_copy);
char * cigar;
cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int)));
int * cigar_int=(int *) (cigar+size*128*sizeof(char));
int * direction;
cudaMalloc( (int **) & direction, size * (640*1100* sizeof (int)));
dim3 block(128);
dim3 grid(size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,direction); //result
// calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result
cudaMemcpy(result_h,result_d,size*sizeof(int)*4,cudaMemcpyDeviceToHost);
// cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost);
// cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
// for(int i=0;i<size;i++)
{
// printf("%d\n",result_h[i*4+1]);
/* printf("[");
for(int j=0;j<result_h[i*4+3];j++)
{
if(j!=0) printf(", ");
printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]);
}
printf("]\n");
*/ }
cudaFree(direction);
free(data_h_total);
cudaFree(data_d_total);
free(inputdata);
cudaFree(cigar);
free(cigar_int_h);
free(cigar_h);
// fscanf(file,"%d",&size);
}
// printf(" computation_time= %e total_time=%e \n",computation_time,0);
printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000);
return 0;
}
|
be5b67ead2f2a0459a01c191c131c4a4ae975afe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "slicer.cuh"
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <stdio.h>
/**
* fps1: First stage of slicing -- Ray Triangle Intersection
* Inputs:
* triangles -- array of all triangles
* num_triangles -- length of the triangle array
* locks -- array of locks (used in atomic memory access)
* Outputs:
* all_intersections -- array of all intersections
* trunk_length -- number of intersections of each pixel ray
*/
__global__
void fps1(triangle* triangles, size_t num_triangles, layer_t* all_intersections, unsigned* trunk_length, int* locks) {
size_t idx = (size_t)blockDim.x * (size_t)blockIdx.x + (size_t)threadIdx.x;
size_t tri_idx = idx / (X_DIM * Y_DIM);
// if (tri_idx >= num_triangles) return;
// copy 1 triangle to the shared memory -- That's all we need on this block
__shared__ triangle triangle_shared;
__shared__ double x_max, x_min;
__shared__ bool y_notInside;
int y_idx = (idx / X_DIM) & (Y_DIM-1);
int y = y_idx - (Y_DIM >> 1);
double y_pos = y * RESOLUTION;
if (threadIdx.x == 0) {
// copy the triangle to shared memory
triangle_shared = triangles[tri_idx];
// compute x_min, x_max of the triangle, store results in shared memory
thrust::maximum<double> max;
thrust::minimum<double> min;
x_max = max(triangle_shared.p1.x, max(triangle_shared.p2.x, triangle_shared.p3.x));
x_min = min(triangle_shared.p1.x, min(triangle_shared.p2.x, triangle_shared.p3.x));
// check if current y value is inside the triangle
// All threads (pixels) on this block have the same y value,
// so this condition only needs to be checked once.
double y_max = max(triangle_shared.p1.y, max(triangle_shared.p2.y, triangle_shared.p3.y));
double y_min = min(triangle_shared.p1.y, min(triangle_shared.p2.y, triangle_shared.p3.y));
y_notInside = (y_pos < y_min) || (y_pos > y_max);
}
__syncthreads();
if (y_notInside) return;
int x_idx = idx & (X_DIM-1);
int x = x_idx - (X_DIM >> 1);
double x_pos = x * RESOLUTION;
bool notInRect = (x_pos < x_min) || (x_pos > x_max);
layer_t* layers = all_intersections + y_idx * X_DIM * MAX_TRUNK_SIZE + x_idx * MAX_TRUNK_SIZE;
unsigned* length = trunk_length + y_idx * X_DIM + x_idx;
// if current pixel is not in the rectangle defined by x_min/max and y_min/max,
// there cannot be an intersection
layer_t intersection = notInRect ? (layer_t)(-1) : pixelRayIntersection(triangle_shared, x, y);
if (intersection != (layer_t)(-1)) {
size_t layer_array_idx = atomicAdd(length, 1);
layers[layer_array_idx] = intersection;
}
}
/**
* fps2: second stage of slicing -- trunk sorting
* Inputs:
* all_intersections -- array of intersections computed in fps1
* trunk_length -- number of intersections of each pixel ray
* Outputs:
* all_intersections -- sorting will be performed in-place
*/
__global__
void fps2(layer_t* all_intersections, unsigned* trunk_length) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= X_DIM * Y_DIM) return;
unsigned length = trunk_length[idx];
layer_t* curr_trunk = all_intersections + (idx * MAX_TRUNK_SIZE);
thrust::sort(thrust::device, curr_trunk, curr_trunk + length);
}
/**
* fps3: third stage of slicing: layer extractions
* Inputs:
* sorted_intersections -- sorted array of intersections
* trunk_length -- number of intersections of each pixel ray
* Outputs:
* out -- Z*X*Y array representing the sliced model. A cell is True
* if it is inside the model, False if not.
*/
__global__
void fps3(layer_t* sorted_intersections, unsigned* trunk_length, bool* out) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
int z_idx = idx / (X_DIM * Y_DIM);
if (z_idx >= NUM_LAYERS) return;
int y_idx = (idx - (z_idx * X_DIM * Y_DIM)) / X_DIM;
int x_idx = (idx - (z_idx * X_DIM * Y_DIM)) & (X_DIM - 1);
unsigned length = trunk_length[y_idx * X_DIM + x_idx];
layer_t* intersection_trunk = sorted_intersections + y_idx * X_DIM * MAX_TRUNK_SIZE + x_idx * MAX_TRUNK_SIZE;
out[idx] = isInside(z_idx, intersection_trunk, length);
}
/**
* pixelRayIntersection: helper function, computes the intersection of given triangle and pixel ray
* Inputs:
* t -- input triangle
* x, y -- coordinates of the input pixel ray
* Returns:
* The layer on which they intersect, or -1 if no intersection
*/
__device__ __forceinline__
layer_t pixelRayIntersection(triangle t, int x, int y) {
/*
Let A, B, C be the 3 vertices of the given triangle
Let S(x,y,z) be the intersection, where x,y are given
We want to find some a, b such that AS = a*AB + b*AC
If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection.
*/
double x_d = x * RESOLUTION - t.p1.x;
double y_d = y * RESOLUTION - t.p1.y;
double x1 = t.p2.x - t.p1.x;
double y1 = t.p2.y - t.p1.y;
double z1 = t.p2.z - t.p1.z;
double x2 = t.p3.x - t.p1.x;
double y2 = t.p3.y - t.p1.y;
double z2 = t.p3.z - t.p1.z;
double a = (x_d * y2 - x2 * y_d) / (x1 * y2 - x2 * y1);
double b = (x_d * y1 - x1 * y_d) / (x2 * y1 - x1 * y2);
bool inside = (a >= 0) && (b >= 0) && (a+b <= 1);
double intersection = (a * z1 + b * z2) + t.p1.z;
// // divide by layer width
layer_t layer = inside ? (intersection / RESOLUTION) : (layer_t)(-1);
return layer;
}
/**
* isInside: given an array of intersection, check if the current pixel is inside the model
* Inputs:
* current -- z value of current pixel
* trunk -- intersection array of current pixel ray
* length -- length of intersection array (trunk)
* Returns:
* True if current pixel is inside the model, False if not
*/
__device__
bool isInside(layer_t current, layer_t* trunk, size_t length) {
size_t startIdx = 0;
size_t endIdx = length;
size_t mid;
bool goLeft;
// perform binary search
while (startIdx < endIdx) {
mid = (startIdx + endIdx) / 2;
if (trunk[mid] == current) return true;
goLeft = trunk[mid] > current;
startIdx = goLeft ? startIdx : (mid + 1);
endIdx = goLeft ? mid : endIdx;
}
return (bool)(startIdx & 1);
}
| be5b67ead2f2a0459a01c191c131c4a4ae975afe.cu | #include "slicer.cuh"
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <stdio.h>
/**
* fps1: First stage of slicing -- Ray Triangle Intersection
* Inputs:
* triangles -- array of all triangles
* num_triangles -- length of the triangle array
* locks -- array of locks (used in atomic memory access)
* Outputs:
* all_intersections -- array of all intersections
* trunk_length -- number of intersections of each pixel ray
*/
__global__
void fps1(triangle* triangles, size_t num_triangles, layer_t* all_intersections, unsigned* trunk_length, int* locks) {
size_t idx = (size_t)blockDim.x * (size_t)blockIdx.x + (size_t)threadIdx.x;
size_t tri_idx = idx / (X_DIM * Y_DIM);
// if (tri_idx >= num_triangles) return;
// copy 1 triangle to the shared memory -- That's all we need on this block
__shared__ triangle triangle_shared;
__shared__ double x_max, x_min;
__shared__ bool y_notInside;
int y_idx = (idx / X_DIM) & (Y_DIM-1);
int y = y_idx - (Y_DIM >> 1);
double y_pos = y * RESOLUTION;
if (threadIdx.x == 0) {
// copy the triangle to shared memory
triangle_shared = triangles[tri_idx];
// compute x_min, x_max of the triangle, store results in shared memory
thrust::maximum<double> max;
thrust::minimum<double> min;
x_max = max(triangle_shared.p1.x, max(triangle_shared.p2.x, triangle_shared.p3.x));
x_min = min(triangle_shared.p1.x, min(triangle_shared.p2.x, triangle_shared.p3.x));
// check if current y value is inside the triangle
// All threads (pixels) on this block have the same y value,
// so this condition only needs to be checked once.
double y_max = max(triangle_shared.p1.y, max(triangle_shared.p2.y, triangle_shared.p3.y));
double y_min = min(triangle_shared.p1.y, min(triangle_shared.p2.y, triangle_shared.p3.y));
y_notInside = (y_pos < y_min) || (y_pos > y_max);
}
__syncthreads();
if (y_notInside) return;
int x_idx = idx & (X_DIM-1);
int x = x_idx - (X_DIM >> 1);
double x_pos = x * RESOLUTION;
bool notInRect = (x_pos < x_min) || (x_pos > x_max);
layer_t* layers = all_intersections + y_idx * X_DIM * MAX_TRUNK_SIZE + x_idx * MAX_TRUNK_SIZE;
unsigned* length = trunk_length + y_idx * X_DIM + x_idx;
// if current pixel is not in the rectangle defined by x_min/max and y_min/max,
// there cannot be an intersection
layer_t intersection = notInRect ? (layer_t)(-1) : pixelRayIntersection(triangle_shared, x, y);
if (intersection != (layer_t)(-1)) {
size_t layer_array_idx = atomicAdd(length, 1);
layers[layer_array_idx] = intersection;
}
}
/**
* fps2: second stage of slicing -- trunk sorting
* Inputs:
* all_intersections -- array of intersections computed in fps1
* trunk_length -- number of intersections of each pixel ray
* Outputs:
* all_intersections -- sorting will be performed in-place
*/
__global__
void fps2(layer_t* all_intersections, unsigned* trunk_length) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= X_DIM * Y_DIM) return;
unsigned length = trunk_length[idx];
layer_t* curr_trunk = all_intersections + (idx * MAX_TRUNK_SIZE);
thrust::sort(thrust::device, curr_trunk, curr_trunk + length);
}
/**
* fps3: third stage of slicing: layer extractions
* Inputs:
* sorted_intersections -- sorted array of intersections
* trunk_length -- number of intersections of each pixel ray
* Outputs:
* out -- Z*X*Y array representing the sliced model. A cell is True
* if it is inside the model, False if not.
*/
__global__
void fps3(layer_t* sorted_intersections, unsigned* trunk_length, bool* out) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
int z_idx = idx / (X_DIM * Y_DIM);
if (z_idx >= NUM_LAYERS) return;
int y_idx = (idx - (z_idx * X_DIM * Y_DIM)) / X_DIM;
int x_idx = (idx - (z_idx * X_DIM * Y_DIM)) & (X_DIM - 1);
unsigned length = trunk_length[y_idx * X_DIM + x_idx];
layer_t* intersection_trunk = sorted_intersections + y_idx * X_DIM * MAX_TRUNK_SIZE + x_idx * MAX_TRUNK_SIZE;
out[idx] = isInside(z_idx, intersection_trunk, length);
}
/**
* pixelRayIntersection: helper function, computes the intersection of given triangle and pixel ray
* Inputs:
* t -- input triangle
* x, y -- coordinates of the input pixel ray
* Returns:
* The layer on which they intersect, or -1 if no intersection
*/
__device__ __forceinline__
layer_t pixelRayIntersection(triangle t, int x, int y) {
/*
Let A, B, C be the 3 vertices of the given triangle
Let S(x,y,z) be the intersection, where x,y are given
We want to find some a, b such that AS = a*AB + b*AC
If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection.
*/
double x_d = x * RESOLUTION - t.p1.x;
double y_d = y * RESOLUTION - t.p1.y;
double x1 = t.p2.x - t.p1.x;
double y1 = t.p2.y - t.p1.y;
double z1 = t.p2.z - t.p1.z;
double x2 = t.p3.x - t.p1.x;
double y2 = t.p3.y - t.p1.y;
double z2 = t.p3.z - t.p1.z;
double a = (x_d * y2 - x2 * y_d) / (x1 * y2 - x2 * y1);
double b = (x_d * y1 - x1 * y_d) / (x2 * y1 - x1 * y2);
bool inside = (a >= 0) && (b >= 0) && (a+b <= 1);
double intersection = (a * z1 + b * z2) + t.p1.z;
// // divide by layer width
layer_t layer = inside ? (intersection / RESOLUTION) : (layer_t)(-1);
return layer;
}
/**
* isInside: given an array of intersection, check if the current pixel is inside the model
* Inputs:
* current -- z value of current pixel
* trunk -- intersection array of current pixel ray
* length -- length of intersection array (trunk)
* Returns:
* True if current pixel is inside the model, False if not
*/
__device__
bool isInside(layer_t current, layer_t* trunk, size_t length) {
size_t startIdx = 0;
size_t endIdx = length;
size_t mid;
bool goLeft;
// perform binary search
while (startIdx < endIdx) {
mid = (startIdx + endIdx) / 2;
if (trunk[mid] == current) return true;
goLeft = trunk[mid] > current;
startIdx = goLeft ? startIdx : (mid + 1);
endIdx = goLeft ? mid : endIdx;
}
return (bool)(startIdx & 1);
}
|
d2574ebd892032600c3f76f4f32d1f8a5ab47641.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
#include "paddle/phi/backends/dynload/cusolver.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/kernels/impl/lu_kernel_impl.h"
#include "paddle/phi/kernels/lu_kernel.h"
namespace phi {
template <typename T>
void cusolver_bufferSize(const hipsolverDnHandle_t& cusolverH,
int m,
int n,
T* d_A,
int lda,
int* lwork);
template <typename T>
void cusolver_getrf(const hipsolverDnHandle_t& cusolverH,
int m,
int n,
T* d_A,
int lda,
T* d_work,
int* d_Ipiv,
int* d_info);
template <>
void cusolver_bufferSize<float>(const hipsolverDnHandle_t& cusolverH,
int m,
int n,
float* d_A,
int lda,
int* lwork) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnSgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork));
}
template <>
void cusolver_bufferSize<double>(const hipsolverDnHandle_t& cusolverH,
int m,
int n,
double* d_A,
int lda,
int* lwork) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::hipsolverDnDgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork));
}
template <>
void cusolver_getrf<float>(const hipsolverDnHandle_t& cusolverH,
int m,
int n,
float* d_A,
int lda,
float* d_work,
int* d_Ipiv,
int* d_info) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSgetrf(
cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info));
}
template <>
void cusolver_getrf<double>(const hipsolverDnHandle_t& cusolverH,
int m,
int n,
double* d_A,
int lda,
double* d_work,
int* d_Ipiv,
int* d_info) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDgetrf(
cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info));
}
template <typename T, typename Context>
void lu_decomposed_kernel(const Context& dev_ctx,
int m,
int n,
T* d_A,
int lda,
int* d_Ipiv,
int* d_info) {
/* step 1: get cusolver handle*/
auto cusolverH = dev_ctx.cusolver_dn_handle();
/* step 2: query working space of getrf */
int lwork;
cusolver_bufferSize(cusolverH, m, n, d_A, lda, &lwork);
auto work_buff = phi::memory_utils::Alloc(
dev_ctx.GetPlace(),
lwork * sizeof(T),
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
T* d_work = reinterpret_cast<T*>(work_buff->ptr());
/* step 3: LU factorization */
if (d_Ipiv) {
cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info);
} else {
cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, NULL, d_info);
}
PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize());
}
template <typename T, typename Context>
void LUKernel(const Context& dev_ctx,
const DenseTensor& x,
bool pivot,
DenseTensor* out,
DenseTensor* pivots,
DenseTensor* infos) {
#ifdef __HIPCC__
const int64_t kMaxBlockDim = 256;
#else
const int64_t kMaxBlockDim = 512;
#endif
*out = Transpose2DTo6D<Context, T>(dev_ctx, x);
auto outdims = out->dims();
auto outrank = outdims.size();
int m = static_cast<int>(outdims[outrank - 1]);
int n = static_cast<int>(outdims[outrank - 2]);
int lda = ::max(1, m);
if (pivot) {
auto ipiv_dims = phi::slice_ddim(outdims, 0, outrank - 1);
ipiv_dims[outrank - 2] = ::min(m, n);
pivots->Resize(ipiv_dims);
}
dev_ctx.template Alloc<int>(pivots);
auto ipiv_data = pivots->data<int>();
auto info_dims = phi::slice_ddim(outdims, 0, outrank - 2);
if (info_dims.size() == 0) {
info_dims = phi::make_ddim({1});
}
infos->Resize(info_dims);
dev_ctx.template Alloc<int>(infos);
auto info_data = infos->data<int>();
auto batchsize = product(info_dims);
batchsize = ::max(static_cast<int>(batchsize), 1);
dev_ctx.template Alloc<T>(out);
auto out_data = out->data<T>();
for (int b = 0; b < batchsize; b++) {
auto out_data_item = &out_data[b * m * n];
int* info_data_item = &info_data[b];
if (pivot) {
auto ipiv_data_item = &ipiv_data[b * ::min(m, n)];
lu_decomposed_kernel(
dev_ctx, m, n, out_data_item, lda, ipiv_data_item, info_data_item);
} else {
lu_decomposed_kernel(
dev_ctx, m, n, out_data_item, lda, NULL, info_data_item);
}
}
*out = Transpose2DTo6D<Context, T>(dev_ctx, *out);
}
} // namespace phi
PD_REGISTER_KERNEL(lu, // cuda_only
GPU,
ALL_LAYOUT,
phi::LUKernel,
float,
double) {}
#endif // not PADDLE_WITH_HIP
| d2574ebd892032600c3f76f4f32d1f8a5ab47641.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
#include "paddle/phi/backends/dynload/cusolver.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/kernels/impl/lu_kernel_impl.h"
#include "paddle/phi/kernels/lu_kernel.h"
namespace phi {
template <typename T>
void cusolver_bufferSize(const cusolverDnHandle_t& cusolverH,
int m,
int n,
T* d_A,
int lda,
int* lwork);
template <typename T>
void cusolver_getrf(const cusolverDnHandle_t& cusolverH,
int m,
int n,
T* d_A,
int lda,
T* d_work,
int* d_Ipiv,
int* d_info);
template <>
void cusolver_bufferSize<float>(const cusolverDnHandle_t& cusolverH,
int m,
int n,
float* d_A,
int lda,
int* lwork) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnSgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork));
}
template <>
void cusolver_bufferSize<double>(const cusolverDnHandle_t& cusolverH,
int m,
int n,
double* d_A,
int lda,
int* lwork) {
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cusolverDnDgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork));
}
template <>
void cusolver_getrf<float>(const cusolverDnHandle_t& cusolverH,
int m,
int n,
float* d_A,
int lda,
float* d_work,
int* d_Ipiv,
int* d_info) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSgetrf(
cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info));
}
template <>
void cusolver_getrf<double>(const cusolverDnHandle_t& cusolverH,
int m,
int n,
double* d_A,
int lda,
double* d_work,
int* d_Ipiv,
int* d_info) {
PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDgetrf(
cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info));
}
template <typename T, typename Context>
void lu_decomposed_kernel(const Context& dev_ctx,
int m,
int n,
T* d_A,
int lda,
int* d_Ipiv,
int* d_info) {
/* step 1: get cusolver handle*/
auto cusolverH = dev_ctx.cusolver_dn_handle();
/* step 2: query working space of getrf */
int lwork;
cusolver_bufferSize(cusolverH, m, n, d_A, lda, &lwork);
auto work_buff = phi::memory_utils::Alloc(
dev_ctx.GetPlace(),
lwork * sizeof(T),
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
T* d_work = reinterpret_cast<T*>(work_buff->ptr());
/* step 3: LU factorization */
if (d_Ipiv) {
cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info);
} else {
cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, NULL, d_info);
}
PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize());
}
template <typename T, typename Context>
void LUKernel(const Context& dev_ctx,
const DenseTensor& x,
bool pivot,
DenseTensor* out,
DenseTensor* pivots,
DenseTensor* infos) {
#ifdef __HIPCC__
const int64_t kMaxBlockDim = 256;
#else
const int64_t kMaxBlockDim = 512;
#endif
*out = Transpose2DTo6D<Context, T>(dev_ctx, x);
auto outdims = out->dims();
auto outrank = outdims.size();
int m = static_cast<int>(outdims[outrank - 1]);
int n = static_cast<int>(outdims[outrank - 2]);
int lda = std::max(1, m);
if (pivot) {
auto ipiv_dims = phi::slice_ddim(outdims, 0, outrank - 1);
ipiv_dims[outrank - 2] = std::min(m, n);
pivots->Resize(ipiv_dims);
}
dev_ctx.template Alloc<int>(pivots);
auto ipiv_data = pivots->data<int>();
auto info_dims = phi::slice_ddim(outdims, 0, outrank - 2);
if (info_dims.size() == 0) {
info_dims = phi::make_ddim({1});
}
infos->Resize(info_dims);
dev_ctx.template Alloc<int>(infos);
auto info_data = infos->data<int>();
auto batchsize = product(info_dims);
batchsize = std::max(static_cast<int>(batchsize), 1);
dev_ctx.template Alloc<T>(out);
auto out_data = out->data<T>();
for (int b = 0; b < batchsize; b++) {
auto out_data_item = &out_data[b * m * n];
int* info_data_item = &info_data[b];
if (pivot) {
auto ipiv_data_item = &ipiv_data[b * std::min(m, n)];
lu_decomposed_kernel(
dev_ctx, m, n, out_data_item, lda, ipiv_data_item, info_data_item);
} else {
lu_decomposed_kernel(
dev_ctx, m, n, out_data_item, lda, NULL, info_data_item);
}
}
*out = Transpose2DTo6D<Context, T>(dev_ctx, *out);
}
} // namespace phi
PD_REGISTER_KERNEL(lu, // cuda_only
GPU,
ALL_LAYOUT,
phi::LUKernel,
float,
double) {}
#endif // not PADDLE_WITH_HIP
|
6eca328a84161abbcdfb8d754a526527c06d943c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*#############################################################################
******************************************************************************
* <name> coproc_storage_cuda </name>
******************************************************************************
*
* <purpose>
* This file provides the basic routines for creating and deleting
* storage on the device and transfering data between host and device
* memory are provided.
* </purpose>
*
*#############################################################################
*/
#include <cmath>
#include <iostream>
#include "coproc_core.h"
#include "coproc_storage_cuda.h"
#include "coproc_transpose.h"
/*******************************************************************************
* Wrappers for malloc/free using hipHostMalloc and cudaHostFree
******************************************************************************/
void coproc_malloc(void **ptr, size_t size)
{
hipHostMalloc(ptr, size);
coproc_checkError("coproc_malloc");
}
/******************************************************************************/
void coproc_free(void **ptr)
{
hipHostFree(*ptr);
coproc_checkError("coproc_free");
}
/*******************************************************************************
***
*** HOST CODE
***
******************************************************************************/
/*******************************************************************************
* Allocate new memory on host
******************************************************************************/
void coproc_newMemoryOnHost(void **h_ptr,
size_t size,
unsigned int flags)
{
hipHostMalloc(h_ptr, size, flags);
coproc_checkError("coproc_newMemoryOnHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_newmemoryonhost)(void **h_ptr,
__SIZET *size,
__INT *flags=hipHostMallocDefault)
{
coproc_newMemoryOnHost(h_ptr, *size, *flags);
}
}
/*******************************************************************************
* Free memory on host
******************************************************************************/
void coproc_freeMemoryOnHost(void *h_ptr)
{
hipHostFree(h_ptr);
coproc_checkError("coproc_freeMemoryOnHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_freememoryonhost)(void **h_ptr)
{
coproc_freeMemoryOnHost(*h_ptr);
}
}
/*******************************************************************************
* Clear memory on host
******************************************************************************/
void coproc_clearMemoryOnHost(void * __restrict__ h_ptr,
size_t size)
{
memset(h_ptr, 0, size);
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_clearmemoryonhost)(void **h_ptr,
__SIZET *size)
{
coproc_clearMemoryOnHost(*h_ptr, *size);
}
}
/*******************************************************************************
***
*** DEVICE CODE
***
******************************************************************************/
/*******************************************************************************
* Allocate new memory on device
******************************************************************************/
void coproc_newMemoryOnDevice(void **d_ptr,
size_t size)
{
hipMalloc(d_ptr, size);
coproc_checkError("coproc_newMemoryOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_newmemoryondevice)(void **d_ptr,
__SIZET *size)
{
coproc_newMemoryOnDevice(d_ptr, *size);
}
}
/*******************************************************************************
* Free existing memory on device
******************************************************************************/
void coproc_freeMemoryOnDevice(void *d_ptr)
{
hipFree(d_ptr);
coproc_checkError("coproc_freeMemoryOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_freememoryondevice)(void **d_ptr)
{
coproc_freeMemoryOnDevice(*d_ptr);
}
}
/*******************************************************************************
* Clear memory on device
******************************************************************************/
void coproc_clearMemoryOnDevice(void * __restrict__ d_ptr,
size_t size)
{
hipMemset(d_ptr, 0, size);
coproc_checkError("coproc_clearMemoryOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_clearmemoryondevice)(void **d_ptr,
__SIZET *size)
{
coproc_clearMemoryOnDevice(*d_ptr, *size);
}
}
/*******************************************************************************
* Copy host memory to host memory synchroneously
******************************************************************************/
void coproc_memcpyHostToHost(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size)
{
hipMemcpy(h_ptrDest, h_ptrSrc, size, hipMemcpyHostToHost);
coproc_checkError("coproc_memcpyHostToHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttohost)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size)
{
coproc_memcpyHostToHost(*h_ptrSrc, *h_ptrDest, *size);
}
}
/*******************************************************************************
* Copy host memory to host memory asynchroneously
******************************************************************************/
void coproc_memcpyHostToHostAsync(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size,
hipStream_t stream)
{
hipMemcpyAsync(h_ptrDest, h_ptrSrc,
size, hipMemcpyHostToHost, stream);
coproc_checkError("coproc_memcpyHostToHostAsync");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttohosteasync)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyHostToHostAsync(*h_ptrSrc, *h_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Copy host memory to device memory synchroneously
******************************************************************************/
void coproc_memcpyHostToDevice(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size)
{
hipMemcpy(d_ptrDest, h_ptrSrc, size, hipMemcpyHostToDevice);
coproc_checkError("coproc_memcpyHostToDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttodevice)(const void *h_ptrSrc,
void **d_ptrDest,
__SIZET *size)
{
coproc_memcpyHostToDevice(h_ptrSrc, *d_ptrDest, *size);
}
}
/*******************************************************************************
* Copy host memory to device memory asynchroneously
******************************************************************************/
void coproc_memcpyHostToDeviceAsync(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size,
hipStream_t stream)
{
hipMemcpyAsync(d_ptrDest, h_ptrSrc,
size, hipMemcpyHostToDevice, stream);
coproc_checkError("coproc_memcpyHostToDeviceAsync");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttodeviceasync)(const void **h_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyHostToDeviceAsync(*h_ptrSrc, *d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Copy device memory to host memory synchroneously
******************************************************************************/
void coproc_memcpyDeviceToHost(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size)
{
hipMemcpy(h_ptrDest, d_ptrSrc, size, hipMemcpyDeviceToHost);
coproc_checkError("coproc_memcpyDeviceToHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetohost)(const void **d_ptrSrc,
void *h_ptrDest,
__SIZET *size)
{
coproc_memcpyDeviceToHost(*d_ptrSrc, h_ptrDest, *size);
}
}
/*******************************************************************************
* Copy device memory to host memory asynchroneously
******************************************************************************/
void coproc_memcpyDeviceToHostAsync(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size,
hipStream_t stream)
{
hipMemcpyAsync(h_ptrDest, d_ptrSrc,
size, hipMemcpyDeviceToHost,stream);
coproc_checkError("coproc_memcpyDeviceToHostAsync");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetohostasync)(const void **d_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyDeviceToHostAsync(*d_ptrSrc, *h_ptrDest, *size,
(hipStream_t)*stream);
}
}
/*******************************************************************************
* Copy device memory data to device memory synchroneously
******************************************************************************/
void coproc_memcpyDeviceToDevice(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size)
{
if (d_ptrDest != d_ptrSrc) {
hipMemcpy(d_ptrDest, d_ptrSrc, size, hipMemcpyDeviceToDevice);
coproc_checkError("coproc_memcpyDeviceToDevice");
}
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetodevice)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size)
{
coproc_memcpyDeviceToDevice(*d_ptrSrc, *d_ptrDest, *size);
}
}
/*******************************************************************************
* Copy device memory data to device memory asynchroneously
******************************************************************************/
void coproc_memcpyDeviceToDeviceAsync(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size,
hipStream_t stream)
{
if (d_ptrDest != d_ptrSrc) {
hipMemcpyAsync(d_ptrDest, d_ptrSrc,
size, hipMemcpyDeviceToDevice, stream);
coproc_checkError("coproc_memcpyDeviceToDeviceAsync");
}
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetodeviceasync)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyDeviceToDeviceAsync(*d_ptrSrc, *d_ptrDest, *size,
(hipStream_t)*stream);
}
}
/*******************************************************************************
* Copy host memory to host memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyHostToHost2d(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/n);
coproc_transposeOnHost2d(h_ptrSrc, h_ptrDest,
bytes_per_elmt, n, nelmt2);
}
//
// 3d version
//
void coproc_tmemcpyHostToHost3d(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/nelmt3/n);
coproc_transposeOnHost3d(h_ptrSrc, h_ptrDest,
bytes_per_elmt, n, nelmt2, nelmt3);
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpyhosttohost2d)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyHostToHost2d(*h_ptrSrc, *h_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpyhosttohost3d)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyHostToHost3d(*h_ptrSrc, *h_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Copy host memory to device memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyHostToDevice2d(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
void *h_ptr;
coproc_newMemoryOnHost(&h_ptr, size, hipHostMallocDefault);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/n);
coproc_transposeOnHost2d(h_ptrSrc, h_ptr, bytes_per_elmt, n, nelmt2);
coproc_memcpyHostToDevice(h_ptr, d_ptrDest, size);
coproc_freeMemoryOnHost(h_ptr);
}
//
// 3d version
//
void coproc_tmemcpyHostToDevice3d(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
void *h_ptr;
coproc_newMemoryOnHost(&h_ptr, size, hipHostMallocDefault);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/nelmt3/n);
coproc_transposeOnHost3d(h_ptrSrc, h_ptr, bytes_per_elmt, n, nelmt2, nelmt3);
coproc_memcpyHostToDevice(h_ptr, d_ptrDest, size);
coproc_freeMemoryOnHost(h_ptr);
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpyhosttodevice2d)(const void *h_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyHostToDevice2d(h_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpyhosttodevice3d)(const void *h_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyHostToDevice3d(h_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Copy device memory to host memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyDeviceToHost2d(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
void *d_ptr;
coproc_newMemoryOnDevice(&d_ptr, size);
coproc_clearMemoryOnDevice(d_ptr, size);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/n);
coproc_transposeOnDevice2d(d_ptrSrc, d_ptr, bytes_per_elmt, n, nelmt2);
coproc_memcpyDeviceToHost(d_ptr, h_ptrDest, size);
coproc_freeMemoryOnDevice(d_ptr);
}
//
// 3d version
//
void coproc_tmemcpyDeviceToHost3d(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
/*
void *h_ptr;
coproc_newMemoryOnHost(&h_ptr, size, hipHostMallocDefault);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/nelmt3/n);
coproc_transposeOnHost3d(h_ptrSrc, h_ptr, bytes_per_elmt, n, nelmt2, nelmt3);
coproc_memcpyHostToDevice(h_ptr, d_ptrDest, size);
coproc_freeMemoryOnHost(h_ptr);
*/
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpydevicetohost2d)(const void **d_ptrSrc,
void *h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyDeviceToHost2d(*d_ptrSrc, h_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpydevicetohost3d)(const void **d_ptrSrc,
void *h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyDeviceToHost3d(*d_ptrSrc, h_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Copy device memory to device memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyDeviceToDevice2d(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
}
//
// 3d version
//
void coproc_tmemcpyDeviceToDevice3d(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpydevicetodevice2d)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyDeviceToDevice2d(*d_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpydevicetodevice3d)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyDeviceToDevice3d(*d_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Combine two generic memory blocks in device memory
******************************************************************************/
template<typename T>
__global__ void combineOnDevice_knl(const T *ptrSrc1,
const T *ptrSrc2,
T *ptrDest,
size_t size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<size)
{
ptrDest[idx] = ptrSrc1[idx] + ptrSrc2[idx];
}
}
template<>
__global__ void combineOnDevice_knl(const bool *ptrSrc1,
const bool *ptrSrc2,
bool *ptrDest,
size_t size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<size)
{
ptrDest[idx] = ptrSrc1[idx] || ptrSrc2[idx];
}
}
/*******************************************************************************
* Combine two single memory blocks in device memory
******************************************************************************/
void coproc_combineSingleOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__SP *ptrSrc1 = (__SP*)(d_ptrSrc1);
__SP *ptrSrc2 = (__SP*)(d_ptrSrc2);
__SP *ptrDest = (__SP*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineSingleOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinesingleondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineSingleOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two double memory blocks in device memory
******************************************************************************/
void coproc_combineDoubleOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__DP *ptrSrc1 = (__DP*)(d_ptrSrc1);
__DP *ptrSrc2 = (__DP*)(d_ptrSrc2);
__DP *ptrDest = (__DP*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineDoubleOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinedoubleondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineDoubleOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two quadrupel memory blocks in device memory
******************************************************************************/
void coproc_combineQuadOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__QP *ptrSrc1 = (__QP*)(d_ptrSrc1);
__QP *ptrSrc2 = (__QP*)(d_ptrSrc2);
__QP *ptrDest = (__QP*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineQuadOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinequadondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineQuadOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two integer memory blocks in device memory
******************************************************************************/
void coproc_combineIntegerOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__INT *ptrSrc1 = (__INT*)(d_ptrSrc1);
__INT *ptrSrc2 = (__INT*)(d_ptrSrc2);
__INT *ptrDest = (__INT*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineIntegerOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineintegerondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineIntegerOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int8 memory blocks in device memory
******************************************************************************/
void coproc_combineInt8OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__I8 *ptrSrc1 = (__I8*)(d_ptrSrc1);
__I8 *ptrSrc2 = (__I8*)(d_ptrSrc2);
__I8 *ptrDest = (__I8*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt8OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint8ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt8OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int16 memory blocks in device memory
******************************************************************************/
void coproc_combineInt16OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__I16 *ptrSrc1 = (__I16*)(d_ptrSrc1);
__I16 *ptrSrc2 = (__I16*)(d_ptrSrc2);
__I16 *ptrDest = (__I16*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt16OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint16ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt16OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int32 memory blocks in device memory
******************************************************************************/
void coproc_combineInt32OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__I32 *ptrSrc1 = (__I32*)(d_ptrSrc1);
__I32 *ptrSrc2 = (__I32*)(d_ptrSrc2);
__I32 *ptrDest = (__I32*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt32OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint32ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt32OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int64 memory blocks in device memory
******************************************************************************/
void coproc_combineInt64OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__I64 *ptrSrc1 = (__I64*)(d_ptrSrc1);
__I64 *ptrSrc2 = (__I64*)(d_ptrSrc2);
__I64 *ptrDest = (__I64*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt64OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint64ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt64OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two logical memory blocks in device memory
******************************************************************************/
void coproc_combineLogicalOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
hipStream_t stream)
{
__LOGICAL *ptrSrc1 = (__LOGICAL*)(d_ptrSrc1);
__LOGICAL *ptrSrc2 = (__LOGICAL*)(d_ptrSrc2);
__LOGICAL *ptrDest = (__LOGICAL*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
hipLaunchKernelGGL(( combineOnDevice_knl), dim3(grid), dim3(block), 0, stream, ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineLogicalOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinelogicalondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineLogicalOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(hipStream_t)(*stream));
}
}
| 6eca328a84161abbcdfb8d754a526527c06d943c.cu | /*#############################################################################
******************************************************************************
* <name> coproc_storage_cuda </name>
******************************************************************************
*
* <purpose>
* This file provides the basic routines for creating and deleting
* storage on the device and transfering data between host and device
* memory are provided.
* </purpose>
*
*#############################################################################
*/
#include <cmath>
#include <iostream>
#include "coproc_core.h"
#include "coproc_storage_cuda.h"
#include "coproc_transpose.h"
/*******************************************************************************
* Wrappers for malloc/free using cudaMallocHost and cudaHostFree
******************************************************************************/
void coproc_malloc(void **ptr, size_t size)
{
cudaMallocHost(ptr, size);
coproc_checkError("coproc_malloc");
}
/******************************************************************************/
void coproc_free(void **ptr)
{
cudaFreeHost(*ptr);
coproc_checkError("coproc_free");
}
/*******************************************************************************
***
*** HOST CODE
***
******************************************************************************/
/*******************************************************************************
* Allocate new memory on host
******************************************************************************/
void coproc_newMemoryOnHost(void **h_ptr,
size_t size,
unsigned int flags)
{
cudaHostAlloc(h_ptr, size, flags);
coproc_checkError("coproc_newMemoryOnHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_newmemoryonhost)(void **h_ptr,
__SIZET *size,
__INT *flags=cudaHostAllocDefault)
{
coproc_newMemoryOnHost(h_ptr, *size, *flags);
}
}
/*******************************************************************************
* Free memory on host
******************************************************************************/
void coproc_freeMemoryOnHost(void *h_ptr)
{
cudaFreeHost(h_ptr);
coproc_checkError("coproc_freeMemoryOnHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_freememoryonhost)(void **h_ptr)
{
coproc_freeMemoryOnHost(*h_ptr);
}
}
/*******************************************************************************
* Clear memory on host
******************************************************************************/
void coproc_clearMemoryOnHost(void * __restrict__ h_ptr,
size_t size)
{
memset(h_ptr, 0, size);
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_clearmemoryonhost)(void **h_ptr,
__SIZET *size)
{
coproc_clearMemoryOnHost(*h_ptr, *size);
}
}
/*******************************************************************************
***
*** DEVICE CODE
***
******************************************************************************/
/*******************************************************************************
* Allocate new memory on device
******************************************************************************/
void coproc_newMemoryOnDevice(void **d_ptr,
size_t size)
{
cudaMalloc(d_ptr, size);
coproc_checkError("coproc_newMemoryOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_newmemoryondevice)(void **d_ptr,
__SIZET *size)
{
coproc_newMemoryOnDevice(d_ptr, *size);
}
}
/*******************************************************************************
* Free existing memory on device
******************************************************************************/
void coproc_freeMemoryOnDevice(void *d_ptr)
{
cudaFree(d_ptr);
coproc_checkError("coproc_freeMemoryOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_freememoryondevice)(void **d_ptr)
{
coproc_freeMemoryOnDevice(*d_ptr);
}
}
/*******************************************************************************
* Clear memory on device
******************************************************************************/
void coproc_clearMemoryOnDevice(void * __restrict__ d_ptr,
size_t size)
{
cudaMemset(d_ptr, 0, size);
coproc_checkError("coproc_clearMemoryOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_clearmemoryondevice)(void **d_ptr,
__SIZET *size)
{
coproc_clearMemoryOnDevice(*d_ptr, *size);
}
}
/*******************************************************************************
* Copy host memory to host memory synchroneously
******************************************************************************/
void coproc_memcpyHostToHost(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size)
{
cudaMemcpy(h_ptrDest, h_ptrSrc, size, cudaMemcpyHostToHost);
coproc_checkError("coproc_memcpyHostToHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttohost)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size)
{
coproc_memcpyHostToHost(*h_ptrSrc, *h_ptrDest, *size);
}
}
/*******************************************************************************
* Copy host memory to host memory asynchroneously
******************************************************************************/
void coproc_memcpyHostToHostAsync(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size,
cudaStream_t stream)
{
cudaMemcpyAsync(h_ptrDest, h_ptrSrc,
size, cudaMemcpyHostToHost, stream);
coproc_checkError("coproc_memcpyHostToHostAsync");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttohosteasync)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyHostToHostAsync(*h_ptrSrc, *h_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Copy host memory to device memory synchroneously
******************************************************************************/
void coproc_memcpyHostToDevice(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size)
{
cudaMemcpy(d_ptrDest, h_ptrSrc, size, cudaMemcpyHostToDevice);
coproc_checkError("coproc_memcpyHostToDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttodevice)(const void *h_ptrSrc,
void **d_ptrDest,
__SIZET *size)
{
coproc_memcpyHostToDevice(h_ptrSrc, *d_ptrDest, *size);
}
}
/*******************************************************************************
* Copy host memory to device memory asynchroneously
******************************************************************************/
void coproc_memcpyHostToDeviceAsync(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size,
cudaStream_t stream)
{
cudaMemcpyAsync(d_ptrDest, h_ptrSrc,
size, cudaMemcpyHostToDevice, stream);
coproc_checkError("coproc_memcpyHostToDeviceAsync");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpyhosttodeviceasync)(const void **h_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyHostToDeviceAsync(*h_ptrSrc, *d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Copy device memory to host memory synchroneously
******************************************************************************/
void coproc_memcpyDeviceToHost(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size)
{
cudaMemcpy(h_ptrDest, d_ptrSrc, size, cudaMemcpyDeviceToHost);
coproc_checkError("coproc_memcpyDeviceToHost");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetohost)(const void **d_ptrSrc,
void *h_ptrDest,
__SIZET *size)
{
coproc_memcpyDeviceToHost(*d_ptrSrc, h_ptrDest, *size);
}
}
/*******************************************************************************
* Copy device memory to host memory asynchroneously
******************************************************************************/
void coproc_memcpyDeviceToHostAsync(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size,
cudaStream_t stream)
{
cudaMemcpyAsync(h_ptrDest, d_ptrSrc,
size, cudaMemcpyDeviceToHost,stream);
coproc_checkError("coproc_memcpyDeviceToHostAsync");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetohostasync)(const void **d_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyDeviceToHostAsync(*d_ptrSrc, *h_ptrDest, *size,
(cudaStream_t)*stream);
}
}
/*******************************************************************************
* Copy device memory data to device memory synchroneously
******************************************************************************/
void coproc_memcpyDeviceToDevice(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size)
{
if (d_ptrDest != d_ptrSrc) {
cudaMemcpy(d_ptrDest, d_ptrSrc, size, cudaMemcpyDeviceToDevice);
coproc_checkError("coproc_memcpyDeviceToDevice");
}
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetodevice)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size)
{
coproc_memcpyDeviceToDevice(*d_ptrSrc, *d_ptrDest, *size);
}
}
/*******************************************************************************
* Copy device memory data to device memory asynchroneously
******************************************************************************/
void coproc_memcpyDeviceToDeviceAsync(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size,
cudaStream_t stream)
{
if (d_ptrDest != d_ptrSrc) {
cudaMemcpyAsync(d_ptrDest, d_ptrSrc,
size, cudaMemcpyDeviceToDevice, stream);
coproc_checkError("coproc_memcpyDeviceToDeviceAsync");
}
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_memcpydevicetodeviceasync)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_memcpyDeviceToDeviceAsync(*d_ptrSrc, *d_ptrDest, *size,
(cudaStream_t)*stream);
}
}
/*******************************************************************************
* Copy host memory to host memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyHostToHost2d(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/n);
coproc_transposeOnHost2d(h_ptrSrc, h_ptrDest,
bytes_per_elmt, n, nelmt2);
}
//
// 3d version
//
void coproc_tmemcpyHostToHost3d(const void * __restrict__ h_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/nelmt3/n);
coproc_transposeOnHost3d(h_ptrSrc, h_ptrDest,
bytes_per_elmt, n, nelmt2, nelmt3);
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpyhosttohost2d)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyHostToHost2d(*h_ptrSrc, *h_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpyhosttohost3d)(const void **h_ptrSrc,
void **h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyHostToHost3d(*h_ptrSrc, *h_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Copy host memory to device memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyHostToDevice2d(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
void *h_ptr;
coproc_newMemoryOnHost(&h_ptr, size, cudaHostAllocDefault);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/n);
coproc_transposeOnHost2d(h_ptrSrc, h_ptr, bytes_per_elmt, n, nelmt2);
coproc_memcpyHostToDevice(h_ptr, d_ptrDest, size);
coproc_freeMemoryOnHost(h_ptr);
}
//
// 3d version
//
void coproc_tmemcpyHostToDevice3d(const void * __restrict__ h_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
void *h_ptr;
coproc_newMemoryOnHost(&h_ptr, size, cudaHostAllocDefault);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/nelmt3/n);
coproc_transposeOnHost3d(h_ptrSrc, h_ptr, bytes_per_elmt, n, nelmt2, nelmt3);
coproc_memcpyHostToDevice(h_ptr, d_ptrDest, size);
coproc_freeMemoryOnHost(h_ptr);
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpyhosttodevice2d)(const void *h_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyHostToDevice2d(h_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpyhosttodevice3d)(const void *h_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyHostToDevice3d(h_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Copy device memory to host memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyDeviceToHost2d(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
void *d_ptr;
coproc_newMemoryOnDevice(&d_ptr, size);
coproc_clearMemoryOnDevice(d_ptr, size);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/n);
coproc_transposeOnDevice2d(d_ptrSrc, d_ptr, bytes_per_elmt, n, nelmt2);
coproc_memcpyDeviceToHost(d_ptr, h_ptrDest, size);
coproc_freeMemoryOnDevice(d_ptr);
}
//
// 3d version
//
void coproc_tmemcpyDeviceToHost3d(const void * __restrict__ d_ptrSrc,
void * __restrict__ h_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
/*
void *h_ptr;
coproc_newMemoryOnHost(&h_ptr, size, cudaHostAllocDefault);
const int n = nelmt1/packsize;
const int bytes_per_elmt = (int)(size/nelmt2/nelmt3/n);
coproc_transposeOnHost3d(h_ptrSrc, h_ptr, bytes_per_elmt, n, nelmt2, nelmt3);
coproc_memcpyHostToDevice(h_ptr, d_ptrDest, size);
coproc_freeMemoryOnHost(h_ptr);
*/
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpydevicetohost2d)(const void **d_ptrSrc,
void *h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyDeviceToHost2d(*d_ptrSrc, h_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpydevicetohost3d)(const void **d_ptrSrc,
void *h_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyDeviceToHost3d(*d_ptrSrc, h_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Copy device memory to device memory synchroneously (using transposition)
******************************************************************************/
//
// 2d version
//
void coproc_tmemcpyDeviceToDevice2d(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int packsize)
{
}
//
// 3d version
//
void coproc_tmemcpyDeviceToDevice3d(const void * __restrict__ d_ptrSrc,
void * __restrict__ d_ptrDest,
size_t size, int nelmt1, int nelmt2,
int nelmt3, int packsize)
{
}
/******************************************************************************/
extern "C" {
//
// 2d version
//
void FNAME(coproc_tmemcpydevicetodevice2d)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *packsize)
{
coproc_tmemcpyDeviceToDevice2d(*d_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *packsize);
}
//
// 3d version
//
void FNAME(coproc_tmemcpydevicetodevice3d)(const void **d_ptrSrc,
void **d_ptrDest,
__SIZET *size,
__INT *nelmt1, __INT *nelmt2,
__INT *nelmt3, __INT *packsize)
{
coproc_tmemcpyDeviceToDevice3d(*d_ptrSrc, *d_ptrDest, *size,
*nelmt1, *nelmt2, *nelmt3, *packsize);
}
}
/*******************************************************************************
* Combine two generic memory blocks in device memory
******************************************************************************/
template<typename T>
__global__ void combineOnDevice_knl(const T *ptrSrc1,
const T *ptrSrc2,
T *ptrDest,
size_t size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<size)
{
ptrDest[idx] = ptrSrc1[idx] + ptrSrc2[idx];
}
}
template<>
__global__ void combineOnDevice_knl(const bool *ptrSrc1,
const bool *ptrSrc2,
bool *ptrDest,
size_t size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<size)
{
ptrDest[idx] = ptrSrc1[idx] || ptrSrc2[idx];
}
}
/*******************************************************************************
* Combine two single memory blocks in device memory
******************************************************************************/
void coproc_combineSingleOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__SP *ptrSrc1 = (__SP*)(d_ptrSrc1);
__SP *ptrSrc2 = (__SP*)(d_ptrSrc2);
__SP *ptrDest = (__SP*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineSingleOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinesingleondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineSingleOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two double memory blocks in device memory
******************************************************************************/
void coproc_combineDoubleOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__DP *ptrSrc1 = (__DP*)(d_ptrSrc1);
__DP *ptrSrc2 = (__DP*)(d_ptrSrc2);
__DP *ptrDest = (__DP*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineDoubleOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinedoubleondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineDoubleOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two quadrupel memory blocks in device memory
******************************************************************************/
void coproc_combineQuadOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__QP *ptrSrc1 = (__QP*)(d_ptrSrc1);
__QP *ptrSrc2 = (__QP*)(d_ptrSrc2);
__QP *ptrDest = (__QP*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineQuadOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinequadondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineQuadOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two integer memory blocks in device memory
******************************************************************************/
void coproc_combineIntegerOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__INT *ptrSrc1 = (__INT*)(d_ptrSrc1);
__INT *ptrSrc2 = (__INT*)(d_ptrSrc2);
__INT *ptrDest = (__INT*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineIntegerOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineintegerondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineIntegerOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int8 memory blocks in device memory
******************************************************************************/
void coproc_combineInt8OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__I8 *ptrSrc1 = (__I8*)(d_ptrSrc1);
__I8 *ptrSrc2 = (__I8*)(d_ptrSrc2);
__I8 *ptrDest = (__I8*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt8OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint8ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt8OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int16 memory blocks in device memory
******************************************************************************/
void coproc_combineInt16OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__I16 *ptrSrc1 = (__I16*)(d_ptrSrc1);
__I16 *ptrSrc2 = (__I16*)(d_ptrSrc2);
__I16 *ptrDest = (__I16*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt16OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint16ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt16OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int32 memory blocks in device memory
******************************************************************************/
void coproc_combineInt32OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__I32 *ptrSrc1 = (__I32*)(d_ptrSrc1);
__I32 *ptrSrc2 = (__I32*)(d_ptrSrc2);
__I32 *ptrDest = (__I32*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt32OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint32ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt32OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two int64 memory blocks in device memory
******************************************************************************/
void coproc_combineInt64OnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__I64 *ptrSrc1 = (__I64*)(d_ptrSrc1);
__I64 *ptrSrc2 = (__I64*)(d_ptrSrc2);
__I64 *ptrDest = (__I64*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineInt64OnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combineint64ondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineInt64OnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
/*******************************************************************************
* Combine two logical memory blocks in device memory
******************************************************************************/
void coproc_combineLogicalOnDevice(const void *d_ptrSrc1,
const void *d_ptrSrc2,
void *d_ptrDest,
size_t size,
cudaStream_t stream)
{
__LOGICAL *ptrSrc1 = (__LOGICAL*)(d_ptrSrc1);
__LOGICAL *ptrSrc2 = (__LOGICAL*)(d_ptrSrc2);
__LOGICAL *ptrDest = (__LOGICAL*)(d_ptrDest);
int blocksize = 128;
dim3 grid;
dim3 block;
block.x = blocksize;
grid.x = (unsigned)ceil(size/(double)(block.x));
combineOnDevice_knl<<<grid, block, 0, stream>>>(ptrSrc1, ptrSrc2,
ptrDest, size);
coproc_checkError("coproc_combineLogicalOnDevice");
}
/******************************************************************************/
extern "C" {
void FNAME(coproc_combinelogicalondevice)(const void **d_ptrSrc1,
const void **d_ptrSrc2,
void **d_ptrDest,
__SIZET *size,
__I64 *stream)
{
coproc_combineLogicalOnDevice(*d_ptrSrc1, *d_ptrSrc2,
*d_ptrDest, *size,
(cudaStream_t)(*stream));
}
}
|
3cc93560015e2e11d46d2274d732934d50704667.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.cuh"
const float NULL_ODDS = 10.0;
const float ODDS_LIMIT = 5.0;
//V1
struct get_pose
{
const int _offset;
const float _resolution;
const float _init_pose;
explicit get_pose(const int& offset, const float& resolution, const float& init_pose):
_offset(offset), _resolution(resolution), _init_pose(init_pose){}
__host__ __device__
float operator()(int index)
{
return float((index-_offset)*_resolution+_init_pose);
}
};
struct get_6dof
{
const int _loop_size_rpyxyz;
const int _loop_size_pyxyz;
const int _loop_size_yxyz;
const int _loop_size_xyz;
const int _loop_size_yz;
const int _loop_size_z;
const float* _roll_angles;
const float* _pitch_angles;
const float* _yaw_angles;
const float* _x_displacements;
const float* _y_displacements;
const float* _z_displacements;
explicit get_6dof(const int& loop_size_rpyxyz,
const int& loop_size_pyxyz,
const int& loop_size_yxyz,
const int& loop_size_xyz,
const int& loop_size_yz,
const int& loop_size_z,
const float* roll_angles,
const float* pitch_angles,
const float* yaw_angles,
const float* x_displacements,
const float* y_displacements,
const float* z_displacements
):
_loop_size_rpyxyz(loop_size_rpyxyz),
_loop_size_pyxyz(loop_size_pyxyz),
_loop_size_yxyz(loop_size_yxyz),
_loop_size_xyz(loop_size_xyz),
_loop_size_yz(loop_size_yz),
_loop_size_z(loop_size_z),
_roll_angles(roll_angles),
_pitch_angles(pitch_angles),
_yaw_angles(yaw_angles),
_x_displacements(x_displacements),
_y_displacements(y_displacements),
_z_displacements(z_displacements)
{}
__host__ __device__
Eigen::Matrix<float, 6, 1> operator()(int pose_index)
{
Eigen::Matrix<float, 6, 1> pose;
pose(0, 0) = _roll_angles[int(pose_index/_loop_size_pyxyz)];
pose(1, 0) = _pitch_angles[int(pose_index%_loop_size_pyxyz/_loop_size_yxyz)];
pose(2, 0) = _yaw_angles[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz/_loop_size_xyz)];
pose(3, 0) = _x_displacements[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz%_loop_size_xyz/_loop_size_yz)];
pose(4, 0) = _y_displacements[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz%_loop_size_xyz%_loop_size_yz/_loop_size_z)];
pose(5, 0) = _z_displacements[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz%_loop_size_xyz%_loop_size_yz%_loop_size_z)];
return pose;
}
};
thrust::device_vector<Eigen::Matrix<float, 6, 1> > GeneratePoses(const Eigen::Vector3f& angular_init_pose,
const int& angular_winsize, const float& angular_step, const Eigen::Vector3f& linear_init_pose,
const int& linear_winsize, const float& linear_step)
{
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
int angular_space_size = 2*angular_winsize+1;
thrust::device_vector<int> angular_indices(angular_space_size);
thrust::sequence(angular_indices.begin(), angular_indices.end());
hipDeviceSynchronize();
float roll = angular_init_pose[0];
thrust::device_vector<float> roll_angles(angular_space_size);
thrust::transform(angular_indices.begin(), angular_indices.end(), roll_angles.begin(),
get_pose(angular_winsize, angular_step, roll));
hipDeviceSynchronize();
float pitch = angular_init_pose[1];
thrust::device_vector<float> pitch_angles(angular_space_size);
thrust::transform(angular_indices.begin(), angular_indices.end(), pitch_angles.begin(),
get_pose(angular_winsize, angular_step, pitch));
hipDeviceSynchronize();
float yaw = angular_init_pose[2];
thrust::device_vector<float> yaw_angles(angular_space_size);
thrust::transform(angular_indices.begin(), angular_indices.end(), yaw_angles.begin(),
get_pose(angular_winsize, angular_step, yaw));
hipDeviceSynchronize();
int linear_space_size = 2*linear_winsize+1;
thrust::device_vector<int> linear_indices(linear_space_size);
thrust::sequence(linear_indices.begin(), linear_indices.end());
hipDeviceSynchronize();
float x = linear_init_pose[0];
thrust::device_vector<float> x_displacements(linear_space_size);
thrust::transform(linear_indices.begin(), linear_indices.end(), x_displacements.begin(),
get_pose(linear_winsize, linear_step, x));
hipDeviceSynchronize();
float y = linear_init_pose[1];
thrust::device_vector<float> y_displacements(linear_space_size);
thrust::transform(linear_indices.begin(), linear_indices.end(), y_displacements.begin(),
get_pose(linear_winsize, linear_step, y));
hipDeviceSynchronize();
float z = linear_init_pose[2];
thrust::device_vector<float> z_displacements(linear_space_size);
thrust::transform(linear_indices.begin(), linear_indices.end(), z_displacements.begin(),
get_pose(linear_winsize, linear_step, z));
hipDeviceSynchronize();
int pose_num = int(pow(angular_space_size,3)*pow(linear_space_size, 3));
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses(pose_num);
thrust::device_vector<int> pose_indices(pose_num);
thrust::sequence(pose_indices.begin(), pose_indices.end());
hipDeviceSynchronize();
int loop_size_rpyxyz = int(pow(angular_space_size,3)*pow(linear_space_size, 3));
int loop_size_pyxyz = int(pow(angular_space_size,2)*pow(linear_space_size, 3));
int loop_size_yxyz = int(angular_space_size*pow(linear_space_size, 3));
int loop_size_xyz = int(pow(linear_space_size, 3));
int loop_size_yz = int(pow(linear_space_size, 2));
int loop_size_z = int(linear_space_size);
thrust::transform(thrust::device, pose_indices.begin(), pose_indices.end(), poses.begin(),
get_6dof(loop_size_rpyxyz, loop_size_pyxyz, loop_size_yxyz,
loop_size_xyz, loop_size_yz, loop_size_z,
thrust::raw_pointer_cast(&roll_angles[0]),
thrust::raw_pointer_cast(&pitch_angles[0]),
thrust::raw_pointer_cast(&yaw_angles[0]),
thrust::raw_pointer_cast(&x_displacements[0]),
thrust::raw_pointer_cast(&y_displacements[0]),
thrust::raw_pointer_cast(&z_displacements[0])
));
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to generate pose: %3.1f ms \n", time);
return poses;
}
struct get_transform
{
__host__ __device__
Eigen::Matrix4f operator()(const Eigen::Matrix<float, 6, 1>& pose)
{
float alpha = pose[0];
float beta = pose[1];
float gamma = pose[2];
float x = pose[3];
float y = pose[4];
float z = pose[5];
Eigen::Matrix4f transform;
// ZYX order
transform << cosf(beta)*cosf(gamma), -cosf(beta)*sinf(gamma), sinf(beta), x,
sinf(alpha)*sinf(beta)*cosf(gamma)+cosf(alpha)*sinf(gamma), -sinf(alpha)*sinf(beta)*sinf(gamma)+cosf(alpha)*cosf(gamma), -sinf(alpha)*cosf(beta), y,
-cosf(alpha)*sinf(beta)*cosf(gamma)+sinf(alpha)*sinf(gamma), cosf(alpha)*sinf(beta)*sinf(gamma)+sinf(alpha)*cosf(gamma), cosf(alpha)*cosf(beta), z,
0.0, 0.0, 0.0, 1.0;
// // XYZ order
// transform << cosf(beta)*cosf(alpha), sinf(gamma)*sinf(beta)*cosf(alpha)-cosf(gamma)*sinf(alpha), cosf(gamma)*sinf(beta)*cosf(alpha)+sinf(gamma)*sinf(alpha), x,
// cosf(beta)*sinf(alpha), sinf(gamma)*sinf(beta)*sinf(alpha)+cosf(gamma)*cosf(alpha), cosf(gamma)*sinf(beta)*sinf(alpha)-sinf(gamma)*cosf(alpha), y,
// -sinf(beta), sinf(gamma)*cosf(beta), cosf(gamma)*cosf(beta), z,
// 0.0, 0.0, 0.0, 1.0;
return transform;
}
};
struct sort_map_point
{
__host__ __device__
bool operator()(const Eigen::Vector4f& lhs, const Eigen::Vector4f& rhs)
{
return (lhs[0]<rhs[0])||
(fabs(lhs[0]-rhs[0])<1e-6&&lhs[1]<rhs[1])||
(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&lhs[2]<rhs[2])||
(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6&&lhs[3]<rhs[3]);
}
};
struct point_transform
{
const Eigen::Vector3f _point;
const float _resolution;
explicit point_transform(const Eigen::Vector3f& point, const float& resolution):
_point(point), _resolution(resolution){}
__host__ __device__
Eigen::Vector3f operator()(const Eigen::Matrix4f& transform)
{
Eigen::Vector4f homo_point;
homo_point << _point[0], _point[1], _point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * transform.col(0) +
homo_point[1] * transform.col(1) +
homo_point[2] * transform.col(2) +
homo_point[3] * transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_resolution),
roundf(homo_transformed_point[1]/_resolution),
roundf(homo_transformed_point[2]/_resolution);
return transformed_point;
}
};
__host__ __device__ bool operator==(const Eigen::Vector3f& lhs, const Eigen::Vector3f& rhs)
{
return fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6;
}
__host__ __device__ bool operator>(const Eigen::Vector3f& lhs, const Eigen::Vector3f& rhs)
{
return (lhs[0]>rhs[0])||(fabs(lhs[0]-rhs[0])<1e-6&&lhs[1]>rhs[1])||(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&lhs[2]>rhs[2]);
}
__host__ __device__ int BinarySearchRecursive(const Eigen::Vector4f* points, int low, int high, Eigen::Vector3f point)
{
if (low > high)
return -1;
int mid = low + (high - low) / 2;
Eigen::Vector3f mid_point;
mid_point << points[mid][0], points[mid][1], points[mid][2];
if (mid_point == point)
return mid;
else if (mid_point > point)
return BinarySearchRecursive(points, low, mid - 1, point);
else
return BinarySearchRecursive(points, mid + 1, high, point);
}
struct match
{
const Eigen::Vector4f* _map;
const int _size;
explicit match(Eigen::Vector4f* map, int size):_map(map), _size(size){}
__host__ __device__
float operator()(const Eigen::Vector3f& point)
{
int idx = BinarySearchRecursive(_map, 0, _size-1, point);
if(idx < 0)
{
return 0.0;
}
else
{
return _map[idx][3];
}
}
};
struct compute_score
{
const int _size;
explicit compute_score(int size):_size(size){}
__host__ __device__
float operator()(const float& sum)
{
return float(sum/float(_size));
}
};
//V2
struct compute_point_score:public thrust::unary_function<Eigen::Vector3f, float> // <arg, result>
{
Eigen::Matrix4f _transform;
int _map_size;
float _map_resolution;
Eigen::Vector4f* _map;
explicit compute_point_score(Eigen::Matrix4f transform, Eigen::Vector4f* map, int& map_size, float& map_resolution):_transform(transform), _map(map), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(Eigen::Vector3f& point)
{
float score;
Eigen::Vector4f homo_point;
homo_point << point[0], point[1], point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * _transform.col(0) + homo_point[1] * _transform.col(1) + homo_point[2] * _transform.col(2) + homo_point[3] * _transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_map_resolution), roundf(homo_transformed_point[1]/_map_resolution), roundf(homo_transformed_point[2]/_map_resolution);
Eigen::Vector3f mid_point;
if (_map_size <= 0)
{
return 0.0;
}
else
{
int low = 0;
int high = _map_size - 1;
while (low <= high)
{
int mid = low + (high - low) / 2;
mid_point << _map[mid][0], _map[mid][1], _map[mid][2];
if (mid_point == transformed_point)
{
score = _map[mid][3];
return score;
}
else if (mid_point > transformed_point)
{
high = mid - 1;
}
else
{
low = mid + 1;
}
}
return 0.0;
}
}
};
struct compute_cloud_score:public thrust::unary_function<Eigen::Matrix4f, float> // <arg, result>
{
Eigen::Vector3f* _scan;
int _scan_size;
Eigen::Vector4f* _map;
int _map_size;
float _map_resolution;
compute_cloud_score(Eigen::Vector3f* scan, int& scan_size, Eigen::Vector4f* map, int& map_size, float& map_resolution):_scan(scan), _scan_size(scan_size), _map(map), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(const Eigen::Matrix4f& transform)
{
thrust::device_ptr<Eigen::Vector3f> dev_scan = thrust::device_pointer_cast(_scan);
float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, compute_point_score(transform, thrust::raw_pointer_cast(_map), _map_size, _map_resolution), 0.0, thrust::plus<float>());
float score = float(sum/_scan_size);
return score;
}
};
//V3
struct assign_value_
{
float* _map_odds;
explicit assign_value_(float* map_odds):_map_odds(map_odds){}
__host__ __device__
void operator()(Eigen::Vector4f map_element)
{
int key = (int(map_element[0])+500)*1000*1000+(int(map_element[1])+500)*1000+(int(map_element[2])+500);
_map_odds[key] = map_element[3];
}
};
struct get_key
{
__host__ __device__
int operator()(Eigen::Vector4f map_element)
{
int key = (int(map_element[0])+500)*1000*1000+(int(map_element[1])+500)*1000+(int(map_element[2])+500);
return key;
}
};
struct faster_compute_point_score:public thrust::unary_function<Eigen::Vector3f, float> // <arg, result>
{
Eigen::Matrix4f _transform;
int _map_size;
float _map_resolution;
float* _map_odds;
explicit faster_compute_point_score(Eigen::Matrix4f transform, float* map_odds, int& map_size, float& map_resolution):_transform(transform), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(Eigen::Vector3f& point)
{
float score;
Eigen::Vector4f homo_point;
homo_point << point[0], point[1], point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * _transform.col(0) + homo_point[1] * _transform.col(1) + homo_point[2] * _transform.col(2) + homo_point[3] * _transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_map_resolution), roundf(homo_transformed_point[1]/_map_resolution), roundf(homo_transformed_point[2]/_map_resolution);
int key = (int(transformed_point[0])+500)*1000000+(int(transformed_point[1])+500)*1000+(int(transformed_point[2])+500);
if(_map_odds[key] > ODDS_LIMIT)
{
return 0.0;
}
else
{
return _map_odds[key];
}
}
};
struct faster_compute_cloud_score:public thrust::unary_function<Eigen::Matrix4f, float> // <arg, result>
{
Eigen::Vector3f* _scan;
int _scan_size;
float* _map_odds;
int _map_size;
float _map_resolution;
explicit faster_compute_cloud_score(Eigen::Vector3f* scan, int& scan_size, float* map_odds, int& map_size, float& map_resolution):_scan(scan), _scan_size(scan_size), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(const Eigen::Matrix4f& transform)
{
thrust::device_ptr<Eigen::Vector3f> dev_scan = thrust::device_pointer_cast(_scan);
float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, faster_compute_point_score(transform, thrust::raw_pointer_cast(_map_odds), _map_size, _map_resolution), 0.0, thrust::plus<float>());
float score = float(sum/_scan_size);
return score;
}
};
//V4
struct cal_transform{
int* _loop_size;
Eigen::Vector3f _angular_init_pose;
int _angular_winsize;
float _angular_step;
Eigen::Vector3f _linear_init_pose;
int _linear_winsize;
float _linear_step;
explicit cal_transform(int* loop_size,const Eigen::Vector3f& angular_init_pose, const int& angular_winsize, const float& angular_step,
const Eigen::Vector3f& linear_init_pose, const int& linear_winsize, const float& linear_step):
_loop_size(loop_size),_angular_init_pose(angular_init_pose),_angular_winsize(angular_winsize),_angular_step(angular_step),
_linear_init_pose(linear_init_pose),_linear_winsize(linear_winsize),_linear_step(linear_step){}
__host__ __device__
Eigen::Matrix4f operator()(const int& pose_seq){
Eigen::Matrix4f transform;
int roll = pose_seq/_loop_size[0];
int pit = pose_seq%_loop_size[0]/_loop_size[1];
int yaw = pose_seq%_loop_size[0]%_loop_size[1]/_loop_size[2];
int x1 = pose_seq%_loop_size[0]%_loop_size[1]%_loop_size[2]/_loop_size[3];
int y1 = pose_seq%_loop_size[0]%_loop_size[1]%_loop_size[2]%_loop_size[3]/_loop_size[4];
int z1 = pose_seq%_loop_size[0]%_loop_size[1]%_loop_size[2]%_loop_size[3]%_loop_size[4];
float alpha = (roll-_angular_winsize)*_angular_step + _angular_init_pose[0];
float beta = (pit-_angular_winsize)*_angular_step + _angular_init_pose[1];
float gamma = (yaw-_angular_winsize)*_angular_step + _angular_init_pose[2];
float x = (x1-_linear_winsize)*_linear_step + _linear_init_pose[0];
float y = (y1-_linear_winsize)*_linear_step + _linear_init_pose[1];
float z = (z1-_linear_winsize)*_linear_step + _linear_init_pose[2];
transform << cosf(beta)*cosf(gamma), -cosf(beta)*sinf(gamma), sinf(beta), x,
sinf(alpha)*sinf(beta)*cosf(gamma)+cosf(alpha)*sinf(gamma), -sinf(alpha)*sinf(beta)*sinf(gamma)+cosf(alpha)*cosf(gamma), -sinf(alpha)*cosf(beta), y,
-cosf(alpha)*sinf(beta)*cosf(gamma)+sinf(alpha)*sinf(gamma), cosf(alpha)*sinf(beta)*sinf(gamma)+sinf(alpha)*cosf(gamma), cosf(alpha)*cosf(beta), z,
0.0, 0.0, 0.0, 1.0;
// // XYZ order
// transform << cosf(beta)*cosf(alpha), sinf(gamma)*sinf(beta)*cosf(alpha)-cosf(gamma)*sinf(alpha), cosf(gamma)*sinf(beta)*cosf(alpha)+sinf(gamma)*sinf(alpha), x,
// cosf(beta)*sinf(alpha), sinf(gamma)*sinf(beta)*sinf(alpha)+cosf(gamma)*cosf(alpha), cosf(gamma)*sinf(beta)*sinf(alpha)-sinf(gamma)*cosf(alpha), y,
// -sinf(beta), sinf(gamma)*cosf(beta), cosf(gamma)*cosf(beta), z,
// 0.0, 0.0, 0.0, 1.0;
return transform;
}
};
thrust::device_vector<Eigen::Matrix4f> GenerateTransform(const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size){
int angular_space_size = 2*angular_window_size+1;
int linear_space_size = 2*linear_window_size+1;
int pose_num = int(pow(angular_space_size,3)*pow(linear_space_size, 3));
thrust::device_vector<Eigen::Matrix4f> transforms(pose_num);
int loop_size_rpyxyz = pose_num;
int loop_size_pyxyz = loop_size_rpyxyz/angular_space_size;
int loop_size_yxyz = loop_size_pyxyz/angular_space_size;
int loop_size_xyz = loop_size_yxyz/angular_space_size;
int loop_size_yz = loop_size_xyz/linear_space_size;
int loop_size_z = loop_size_yz/linear_space_size;
thrust::device_vector<int> loop_gap(6);
loop_gap[0] = loop_size_pyxyz;
loop_gap[1] = loop_size_yxyz;
loop_gap[2]=loop_size_xyz;
loop_gap[3]=loop_size_yz;
loop_gap[4]=loop_size_z;
thrust::device_vector<int> pose_seq(pose_num);
thrust::sequence(pose_seq.begin(), pose_seq.end());
hipDeviceSynchronize();
thrust::transform(thrust::device, pose_seq.begin(), pose_seq.end(), transforms.begin(), cal_transform(
thrust::raw_pointer_cast(loop_gap.data()),angular_init_pose,angular_window_size,angular_step_size,
linear_init_pose, linear_window_size, linear_step_size));
hipDeviceSynchronize();
return transforms;
}
struct get_element{
int x_;
explicit get_element(int x):x_(x){}
__host__ __device__
float operator()(const Eigen::Vector4f& point){
return int(point[x_]);
}
};
std::vector<int> MinMaxXYZ(thrust::device_vector<Eigen::Vector4f>& dev_map, int map_size){
thrust::device_vector<int> point_x(map_size);
thrust::device_vector<int> point_y(map_size);
thrust::device_vector<int> point_z(map_size);
thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), point_x.begin(), get_element(0));
thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), point_y.begin(), get_element(1));
thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), point_z.begin(), get_element(2));
hipDeviceSynchronize();
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> resx = thrust::minmax_element(thrust::device, point_x.begin(), point_x.end());
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> resy = thrust::minmax_element(thrust::device, point_y.begin(), point_y.end());
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> resz = thrust::minmax_element(thrust::device, point_z.begin(), point_z.end());
hipDeviceSynchronize();
// int min_x = *(thrust::min_element(thrust::device, point_x.begin(), point_x.end()));
// int max_x = *(thrust::max_element(thrust::device, point_x.begin(), point_x.end()));
// int min_y = *(thrust::min_element(thrust::device, point_y.begin(), point_y.end()));
// int max_y = *(thrust::max_element(thrust::device, point_y.begin(), point_y.end()));
// int min_z = *(thrust::min_element(thrust::device, point_z.begin(), point_z.end()));
// int max_z = *(thrust::max_element(thrust::device, point_z.begin(), point_z.end()));
std::vector<int> result;
result.push_back(*resx.first);
result.push_back(*resy.first);
result.push_back(*resz.first);
result.push_back(*resx.second);
result.push_back(*resy.second);
result.push_back(*resz.second);
return result;
}
struct assign_valueV2_
{
float* _map_odds;
int* _offset;
int* _map_length;
explicit assign_valueV2_(float* map_odds, int* offset, int* map_length):_map_odds(map_odds),_offset(offset),_map_length(map_length){}
__host__ __device__
void operator()(Eigen::Vector4f map_element)
{
int key = (int(map_element[0])+_offset[0])*_map_length[1]*_map_length[2]+(int(map_element[1])+_offset[1])*_map_length[2]+(int(map_element[2])+_offset[2]);
_map_odds[key] = map_element[3];
}
};
struct faster_compute_point_scoreV2:public thrust::unary_function<Eigen::Vector3f, float> // <arg, result>
{
Eigen::Matrix4f _transform;
int _map_size;
float _map_resolution;
float* _map_odds;
int* _offset;
int* _map_length;
int _map_odds_size;
explicit faster_compute_point_scoreV2(Eigen::Matrix4f transform, float* map_odds, int& map_size, float& map_resolution, int* offset, int* map_length, int map_odds_size):
_transform(transform), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution), _offset(offset), _map_length(map_length),_map_odds_size(map_odds_size){}
__host__ __device__
float operator()(Eigen::Vector3f& point)
{
float score;
Eigen::Vector4f homo_point;
homo_point << point[0], point[1], point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * _transform.col(0) + homo_point[1] * _transform.col(1) + homo_point[2] * _transform.col(2) + homo_point[3] * _transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_map_resolution), roundf(homo_transformed_point[1]/_map_resolution), roundf(homo_transformed_point[2]/_map_resolution);
int tp_x = int(transformed_point[0])+_offset[0];
int tp_y = int(transformed_point[1])+_offset[1];
int tp_z = int(transformed_point[2])+_offset[2];
if(tp_x<0 || tp_x>=_map_length[0] || tp_y<0 || tp_y>=_map_length[1] || tp_z<0 || tp_z>=_map_length[2] ){
return 0.0;
}
int key = tp_x*_map_length[1]*_map_length[2]+tp_y*_map_length[2]+tp_z;
if(_map_odds[key] > ODDS_LIMIT)
{
return 0.0;
}
else
{
return _map_odds[key];
}
}
};
struct faster_compute_cloud_scoreV2:public thrust::unary_function<Eigen::Matrix4f, float> // <arg, result>
{
Eigen::Vector3f* _scan;
int _scan_size;
float* _map_odds;
int _map_size;
float _map_resolution;
int* _offset;
int* _map_length;
int _map_odds_size;
explicit faster_compute_cloud_scoreV2(Eigen::Vector3f* scan, int& scan_size, float* map_odds, int& map_size, float& map_resolution, int* offset, int* map_length, int map_odds_size):
_scan(scan), _scan_size(scan_size), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution),_offset(offset),_map_length(map_length),_map_odds_size(map_odds_size){}
__host__ __device__
float operator()(const Eigen::Matrix4f& transform)
{
thrust::device_ptr<Eigen::Vector3f> dev_scan = thrust::device_pointer_cast(_scan);
// float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, faster_compute_point_score(transform, thrust::raw_pointer_cast(_map_odds), _map_size, _map_resolution), 0.0, thrust::plus<float>());
float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, faster_compute_point_scoreV2(
transform, thrust::raw_pointer_cast(_map_odds), _map_size, _map_resolution,thrust::raw_pointer_cast(_offset),thrust::raw_pointer_cast(_map_length),_map_odds_size), 0.0, thrust::plus<float>());
float score = float(sum/_scan_size);
return score;
}
};
//__host__ __device__ bool operator==(const Eigen::Vector4f& lhs, const Eigen::Vector4f& rhs)
//{
// return fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6;
//}
//
//__host__ __device__ bool operator>(const Eigen::Vector4f& lhs, const Eigen::Vector4f& rhs)
//{
// return (lhs[0]>rhs[0])||(fabs(lhs[0]-rhs[0])<1e-6&&lhs[1]>rhs[1])||(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&lhs[2]>rhs[2])||(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6&&lhs[3]>rhs[3]);
//}
void ComputeOptimalPoseV1(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
const float& map_resolution)
{
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses = GeneratePoses(
angular_init_pose, angular_window_size, angular_step_size,
linear_init_pose, linear_window_size, linear_step_size);
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms(poses.size());
thrust::transform(thrust::device, poses.begin(), poses.end(), transforms.begin(), get_transform());
hipDeviceSynchronize();
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to generate transforms: %3.1f ms \n", time);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::device_vector<Eigen::Vector3f> trans_point(poses.size());
thrust::device_vector<float> score_tile(poses.size());
thrust::device_vector<float> score_bins(poses.size());
thrust::fill(thrust::device, score_bins.begin(), score_bins.end(), 0.0);
hipDeviceSynchronize();
int map_size = map.size();
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
thrust::sort(thrust::device, dev_map.begin(), dev_map.end(), sort_map_point());
hipDeviceSynchronize();
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
for(int i = 0 ; i < scan.size(); i++)
{
thrust::transform(thrust::device, transforms.begin(), transforms.end(), trans_point.begin(),
point_transform(scan[i], map_resolution));
hipDeviceSynchronize();
thrust::transform(thrust::device, trans_point.begin(), trans_point.end(), score_tile.begin(),
match(thrust::raw_pointer_cast(&dev_map[0]), map_size));
hipDeviceSynchronize();
thrust::transform(thrust::device, score_bins.begin(), score_bins.end(), score_tile.begin(), score_bins.begin(), thrust::plus<float>());
hipDeviceSynchronize();
}
thrust::transform(thrust::device, score_bins.begin(), score_bins.end(), score_bins.begin(), compute_score(scan_size));
hipDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(score_bins.begin(), score_bins.end());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to compute optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - score_bins.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<score_bins[opt_pose_idx]<<std::endl;
// thrust::host_vector<Eigen::Matrix<float, 6, 1> > host_poses = poses;
// std::cout<<"Optimal Pose: (roll)"<<host_poses[opt_pose_idx][0]<<" rad, (pitch)"
// <<host_poses[opt_pose_idx][1]<<" rad, (yaw)"
// <<host_poses[opt_pose_idx][2]<<" rad, (x)"
// <<host_poses[opt_pose_idx][3]<<" m, (y)"
// <<host_poses[opt_pose_idx][4]<<" m, (z)"
// <<host_poses[opt_pose_idx][5]<<" m"<<std::endl;
}
void ComputeOptimalPoseV2(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
float& map_resolution)
{
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses = GeneratePoses(angular_init_pose, angular_window_size, angular_step_size, linear_init_pose, linear_window_size, linear_step_size);
int pose_num = poses.size();
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms(pose_num);
thrust::transform(thrust::device, poses.begin(), poses.end(), transforms.begin(), get_transform());
hipDeviceSynchronize();
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to generate transforms: %3.1f ms \n", time);
thrust::device_vector<Eigen::Vector3f> dev_scan = scan;
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
int map_size = map.size();
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::sort(thrust::device, dev_map.begin(), dev_map.end(), sort_map_point());
hipDeviceSynchronize();
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
thrust::device_vector<float> scores(pose_num);
thrust::transform(thrust::device, transforms.begin(), transforms.end(), scores.begin(), compute_cloud_score(thrust::raw_pointer_cast(dev_scan.data()), scan_size,
thrust::raw_pointer_cast(dev_map.data()), map_size, map_resolution));
hipDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(scores.begin(), scores.end());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to compute optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - scores.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<scores[opt_pose_idx]<<std::endl;
// thrust::host_vector<Eigen::Matrix<float, 6, 1> > host_poses = poses;
// std::cout<<"Optimal Pose: (roll)"<<host_poses[opt_pose_idx][0]<<" rad, (pitch)"
// <<host_poses[opt_pose_idx][1]<<" rad, (yaw)"
// <<host_poses[opt_pose_idx][2]<<" rad, (x)"
// <<host_poses[opt_pose_idx][3]<<" m, (y)"
// <<host_poses[opt_pose_idx][4]<<" m, (z)"
// <<host_poses[opt_pose_idx][5]<<" m"<<std::endl;
}
void ComputeOptimalPoseV3(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
float& map_resolution)
{
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses = GeneratePoses(angular_init_pose, angular_window_size, angular_step_size, linear_init_pose, linear_window_size, linear_step_size);
int pose_num = poses.size();
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms(pose_num);
thrust::transform(thrust::device, poses.begin(), poses.end(), transforms.begin(), get_transform());
hipDeviceSynchronize();
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Generate transformations: %3.1f ms \n", time);
thrust::device_vector<Eigen::Vector3f> dev_scan = scan;
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
int map_size = map.size();
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
int map_odds_size = pow(1000,3);
thrust::device_vector<float> map_odds(map_odds_size);
thrust::fill(thrust::device, map_odds.begin(), map_odds.end(), NULL_ODDS);
hipDeviceSynchronize();
// method 1
// thrust::device_vector<int> indices(map.size());
// thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), indices.begin(), get_key());
// hipDeviceSynchronize();
// thrust::permutation_iterator<thrust::device_vector<float>::iterator, thrust::device_vector<int>::iterator> iter(map_odds.begin(), indices.begin());
// thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), iter, assign_value());
// hipDeviceSynchronize();
// method 2
thrust::for_each(thrust::device, dev_map.begin(), dev_map.end(), assign_value_(thrust::raw_pointer_cast(map_odds.data())));
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Generate hashmap: %3.1f ms \n", time);
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
// create thrust vector of thrust vector
// thrust::device_vector<Eigen::Vector3f> trans_scans[pose_num];
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::device_vector<float> scores(pose_num);
thrust::transform(thrust::device, transforms.begin(), transforms.end(), scores.begin(), faster_compute_cloud_score(thrust::raw_pointer_cast(dev_scan.data()), scan_size,
thrust::raw_pointer_cast(map_odds.data()), map_size, map_resolution));
hipDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(scores.begin(), scores.end());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Calculate optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - scores.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<scores[opt_pose_idx]<<std::endl;
// thrust::host_vector<Eigen::Matrix<float, 6, 1> > host_poses = poses;
// std::cout<<"Optimal Pose: (roll)"<<host_poses[opt_pose_idx][0]<<" rad, (pitch)"
// <<host_poses[opt_pose_idx][1]<<" rad, (yaw)"
// <<host_poses[opt_pose_idx][2]<<" rad, (x)"
// <<host_poses[opt_pose_idx][3]<<" m, (y)"
// <<host_poses[opt_pose_idx][4]<<" m, (z)"
// <<host_poses[opt_pose_idx][5]<<" m"<<std::endl;
}
void ComputeOptimalPoseV4(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
float& map_resolution)
{
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms = GenerateTransform(angular_init_pose, angular_window_size, angular_step_size,
linear_init_pose, linear_window_size, linear_step_size);
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
int pose_num = transforms.size();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to generate transforms: %3.1f ms \n", time);
thrust::device_vector<Eigen::Vector3f> dev_scan = scan;
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
int map_size = map.size();
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
std::vector<int> minmax = MinMaxXYZ(dev_map, map_size);
thrust::device_vector<int> dev_offset(3);
dev_offset[0] = -minmax[0];
dev_offset[1] = -minmax[1];
dev_offset[2] = -minmax[2];
thrust::device_vector<int> dev_maplength(3);
dev_maplength[0] = minmax[3]-minmax[0]+1;
dev_maplength[1] = minmax[4]-minmax[1]+1;
dev_maplength[2] = minmax[5]-minmax[2]+1;
int map_odds_size = dev_maplength[0]*dev_maplength[1]*dev_maplength[2];
thrust::device_vector<float> map_odds(map_odds_size);
thrust::fill(thrust::device, map_odds.begin(), map_odds.end(), NULL_ODDS);
hipDeviceSynchronize();
thrust::for_each(thrust::device, dev_map.begin(), dev_map.end(), assign_valueV2_(thrust::raw_pointer_cast(map_odds.data()),
thrust::raw_pointer_cast(dev_offset.data()), thrust::raw_pointer_cast(dev_maplength.data())));
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to generate hashmap: %3.1f ms \n", time);
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
thrust::device_vector<float> scores(pose_num);
thrust::transform(thrust::device, transforms.begin(), transforms.end(), scores.begin(), faster_compute_cloud_scoreV2(
thrust::raw_pointer_cast(dev_scan.data()), scan_size,thrust::raw_pointer_cast(map_odds.data()), map_size, map_resolution,
thrust::raw_pointer_cast(dev_offset.data()), thrust::raw_pointer_cast(dev_maplength.data()),map_odds_size));
hipDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(scores.begin(), scores.end());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// printf("Time to compute optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - scores.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<scores[opt_pose_idx]<<std::endl;
}
| 3cc93560015e2e11d46d2274d732934d50704667.cu | #include "utils.cuh"
const float NULL_ODDS = 10.0;
const float ODDS_LIMIT = 5.0;
//V1
struct get_pose
{
const int _offset;
const float _resolution;
const float _init_pose;
explicit get_pose(const int& offset, const float& resolution, const float& init_pose):
_offset(offset), _resolution(resolution), _init_pose(init_pose){}
__host__ __device__
float operator()(int index)
{
return float((index-_offset)*_resolution+_init_pose);
}
};
struct get_6dof
{
const int _loop_size_rpyxyz;
const int _loop_size_pyxyz;
const int _loop_size_yxyz;
const int _loop_size_xyz;
const int _loop_size_yz;
const int _loop_size_z;
const float* _roll_angles;
const float* _pitch_angles;
const float* _yaw_angles;
const float* _x_displacements;
const float* _y_displacements;
const float* _z_displacements;
explicit get_6dof(const int& loop_size_rpyxyz,
const int& loop_size_pyxyz,
const int& loop_size_yxyz,
const int& loop_size_xyz,
const int& loop_size_yz,
const int& loop_size_z,
const float* roll_angles,
const float* pitch_angles,
const float* yaw_angles,
const float* x_displacements,
const float* y_displacements,
const float* z_displacements
):
_loop_size_rpyxyz(loop_size_rpyxyz),
_loop_size_pyxyz(loop_size_pyxyz),
_loop_size_yxyz(loop_size_yxyz),
_loop_size_xyz(loop_size_xyz),
_loop_size_yz(loop_size_yz),
_loop_size_z(loop_size_z),
_roll_angles(roll_angles),
_pitch_angles(pitch_angles),
_yaw_angles(yaw_angles),
_x_displacements(x_displacements),
_y_displacements(y_displacements),
_z_displacements(z_displacements)
{}
__host__ __device__
Eigen::Matrix<float, 6, 1> operator()(int pose_index)
{
Eigen::Matrix<float, 6, 1> pose;
pose(0, 0) = _roll_angles[int(pose_index/_loop_size_pyxyz)];
pose(1, 0) = _pitch_angles[int(pose_index%_loop_size_pyxyz/_loop_size_yxyz)];
pose(2, 0) = _yaw_angles[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz/_loop_size_xyz)];
pose(3, 0) = _x_displacements[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz%_loop_size_xyz/_loop_size_yz)];
pose(4, 0) = _y_displacements[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz%_loop_size_xyz%_loop_size_yz/_loop_size_z)];
pose(5, 0) = _z_displacements[int(pose_index%_loop_size_pyxyz%_loop_size_yxyz%_loop_size_xyz%_loop_size_yz%_loop_size_z)];
return pose;
}
};
thrust::device_vector<Eigen::Matrix<float, 6, 1> > GeneratePoses(const Eigen::Vector3f& angular_init_pose,
const int& angular_winsize, const float& angular_step, const Eigen::Vector3f& linear_init_pose,
const int& linear_winsize, const float& linear_step)
{
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int angular_space_size = 2*angular_winsize+1;
thrust::device_vector<int> angular_indices(angular_space_size);
thrust::sequence(angular_indices.begin(), angular_indices.end());
cudaDeviceSynchronize();
float roll = angular_init_pose[0];
thrust::device_vector<float> roll_angles(angular_space_size);
thrust::transform(angular_indices.begin(), angular_indices.end(), roll_angles.begin(),
get_pose(angular_winsize, angular_step, roll));
cudaDeviceSynchronize();
float pitch = angular_init_pose[1];
thrust::device_vector<float> pitch_angles(angular_space_size);
thrust::transform(angular_indices.begin(), angular_indices.end(), pitch_angles.begin(),
get_pose(angular_winsize, angular_step, pitch));
cudaDeviceSynchronize();
float yaw = angular_init_pose[2];
thrust::device_vector<float> yaw_angles(angular_space_size);
thrust::transform(angular_indices.begin(), angular_indices.end(), yaw_angles.begin(),
get_pose(angular_winsize, angular_step, yaw));
cudaDeviceSynchronize();
int linear_space_size = 2*linear_winsize+1;
thrust::device_vector<int> linear_indices(linear_space_size);
thrust::sequence(linear_indices.begin(), linear_indices.end());
cudaDeviceSynchronize();
float x = linear_init_pose[0];
thrust::device_vector<float> x_displacements(linear_space_size);
thrust::transform(linear_indices.begin(), linear_indices.end(), x_displacements.begin(),
get_pose(linear_winsize, linear_step, x));
cudaDeviceSynchronize();
float y = linear_init_pose[1];
thrust::device_vector<float> y_displacements(linear_space_size);
thrust::transform(linear_indices.begin(), linear_indices.end(), y_displacements.begin(),
get_pose(linear_winsize, linear_step, y));
cudaDeviceSynchronize();
float z = linear_init_pose[2];
thrust::device_vector<float> z_displacements(linear_space_size);
thrust::transform(linear_indices.begin(), linear_indices.end(), z_displacements.begin(),
get_pose(linear_winsize, linear_step, z));
cudaDeviceSynchronize();
int pose_num = int(pow(angular_space_size,3)*pow(linear_space_size, 3));
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses(pose_num);
thrust::device_vector<int> pose_indices(pose_num);
thrust::sequence(pose_indices.begin(), pose_indices.end());
cudaDeviceSynchronize();
int loop_size_rpyxyz = int(pow(angular_space_size,3)*pow(linear_space_size, 3));
int loop_size_pyxyz = int(pow(angular_space_size,2)*pow(linear_space_size, 3));
int loop_size_yxyz = int(angular_space_size*pow(linear_space_size, 3));
int loop_size_xyz = int(pow(linear_space_size, 3));
int loop_size_yz = int(pow(linear_space_size, 2));
int loop_size_z = int(linear_space_size);
thrust::transform(thrust::device, pose_indices.begin(), pose_indices.end(), poses.begin(),
get_6dof(loop_size_rpyxyz, loop_size_pyxyz, loop_size_yxyz,
loop_size_xyz, loop_size_yz, loop_size_z,
thrust::raw_pointer_cast(&roll_angles[0]),
thrust::raw_pointer_cast(&pitch_angles[0]),
thrust::raw_pointer_cast(&yaw_angles[0]),
thrust::raw_pointer_cast(&x_displacements[0]),
thrust::raw_pointer_cast(&y_displacements[0]),
thrust::raw_pointer_cast(&z_displacements[0])
));
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to generate pose: %3.1f ms \n", time);
return poses;
}
struct get_transform
{
__host__ __device__
Eigen::Matrix4f operator()(const Eigen::Matrix<float, 6, 1>& pose)
{
float alpha = pose[0];
float beta = pose[1];
float gamma = pose[2];
float x = pose[3];
float y = pose[4];
float z = pose[5];
Eigen::Matrix4f transform;
// ZYX order
transform << cosf(beta)*cosf(gamma), -cosf(beta)*sinf(gamma), sinf(beta), x,
sinf(alpha)*sinf(beta)*cosf(gamma)+cosf(alpha)*sinf(gamma), -sinf(alpha)*sinf(beta)*sinf(gamma)+cosf(alpha)*cosf(gamma), -sinf(alpha)*cosf(beta), y,
-cosf(alpha)*sinf(beta)*cosf(gamma)+sinf(alpha)*sinf(gamma), cosf(alpha)*sinf(beta)*sinf(gamma)+sinf(alpha)*cosf(gamma), cosf(alpha)*cosf(beta), z,
0.0, 0.0, 0.0, 1.0;
// // XYZ order
// transform << cosf(beta)*cosf(alpha), sinf(gamma)*sinf(beta)*cosf(alpha)-cosf(gamma)*sinf(alpha), cosf(gamma)*sinf(beta)*cosf(alpha)+sinf(gamma)*sinf(alpha), x,
// cosf(beta)*sinf(alpha), sinf(gamma)*sinf(beta)*sinf(alpha)+cosf(gamma)*cosf(alpha), cosf(gamma)*sinf(beta)*sinf(alpha)-sinf(gamma)*cosf(alpha), y,
// -sinf(beta), sinf(gamma)*cosf(beta), cosf(gamma)*cosf(beta), z,
// 0.0, 0.0, 0.0, 1.0;
return transform;
}
};
struct sort_map_point
{
__host__ __device__
bool operator()(const Eigen::Vector4f& lhs, const Eigen::Vector4f& rhs)
{
return (lhs[0]<rhs[0])||
(fabs(lhs[0]-rhs[0])<1e-6&&lhs[1]<rhs[1])||
(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&lhs[2]<rhs[2])||
(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6&&lhs[3]<rhs[3]);
}
};
struct point_transform
{
const Eigen::Vector3f _point;
const float _resolution;
explicit point_transform(const Eigen::Vector3f& point, const float& resolution):
_point(point), _resolution(resolution){}
__host__ __device__
Eigen::Vector3f operator()(const Eigen::Matrix4f& transform)
{
Eigen::Vector4f homo_point;
homo_point << _point[0], _point[1], _point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * transform.col(0) +
homo_point[1] * transform.col(1) +
homo_point[2] * transform.col(2) +
homo_point[3] * transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_resolution),
roundf(homo_transformed_point[1]/_resolution),
roundf(homo_transformed_point[2]/_resolution);
return transformed_point;
}
};
__host__ __device__ bool operator==(const Eigen::Vector3f& lhs, const Eigen::Vector3f& rhs)
{
return fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6;
}
__host__ __device__ bool operator>(const Eigen::Vector3f& lhs, const Eigen::Vector3f& rhs)
{
return (lhs[0]>rhs[0])||(fabs(lhs[0]-rhs[0])<1e-6&&lhs[1]>rhs[1])||(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&lhs[2]>rhs[2]);
}
__host__ __device__ int BinarySearchRecursive(const Eigen::Vector4f* points, int low, int high, Eigen::Vector3f point)
{
if (low > high)
return -1;
int mid = low + (high - low) / 2;
Eigen::Vector3f mid_point;
mid_point << points[mid][0], points[mid][1], points[mid][2];
if (mid_point == point)
return mid;
else if (mid_point > point)
return BinarySearchRecursive(points, low, mid - 1, point);
else
return BinarySearchRecursive(points, mid + 1, high, point);
}
struct match
{
const Eigen::Vector4f* _map;
const int _size;
explicit match(Eigen::Vector4f* map, int size):_map(map), _size(size){}
__host__ __device__
float operator()(const Eigen::Vector3f& point)
{
int idx = BinarySearchRecursive(_map, 0, _size-1, point);
if(idx < 0)
{
return 0.0;
}
else
{
return _map[idx][3];
}
}
};
struct compute_score
{
const int _size;
explicit compute_score(int size):_size(size){}
__host__ __device__
float operator()(const float& sum)
{
return float(sum/float(_size));
}
};
//V2
struct compute_point_score:public thrust::unary_function<Eigen::Vector3f, float> // <arg, result>
{
Eigen::Matrix4f _transform;
int _map_size;
float _map_resolution;
Eigen::Vector4f* _map;
explicit compute_point_score(Eigen::Matrix4f transform, Eigen::Vector4f* map, int& map_size, float& map_resolution):_transform(transform), _map(map), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(Eigen::Vector3f& point)
{
float score;
Eigen::Vector4f homo_point;
homo_point << point[0], point[1], point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * _transform.col(0) + homo_point[1] * _transform.col(1) + homo_point[2] * _transform.col(2) + homo_point[3] * _transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_map_resolution), roundf(homo_transformed_point[1]/_map_resolution), roundf(homo_transformed_point[2]/_map_resolution);
Eigen::Vector3f mid_point;
if (_map_size <= 0)
{
return 0.0;
}
else
{
int low = 0;
int high = _map_size - 1;
while (low <= high)
{
int mid = low + (high - low) / 2;
mid_point << _map[mid][0], _map[mid][1], _map[mid][2];
if (mid_point == transformed_point)
{
score = _map[mid][3];
return score;
}
else if (mid_point > transformed_point)
{
high = mid - 1;
}
else
{
low = mid + 1;
}
}
return 0.0;
}
}
};
struct compute_cloud_score:public thrust::unary_function<Eigen::Matrix4f, float> // <arg, result>
{
Eigen::Vector3f* _scan;
int _scan_size;
Eigen::Vector4f* _map;
int _map_size;
float _map_resolution;
compute_cloud_score(Eigen::Vector3f* scan, int& scan_size, Eigen::Vector4f* map, int& map_size, float& map_resolution):_scan(scan), _scan_size(scan_size), _map(map), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(const Eigen::Matrix4f& transform)
{
thrust::device_ptr<Eigen::Vector3f> dev_scan = thrust::device_pointer_cast(_scan);
float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, compute_point_score(transform, thrust::raw_pointer_cast(_map), _map_size, _map_resolution), 0.0, thrust::plus<float>());
float score = float(sum/_scan_size);
return score;
}
};
//V3
struct assign_value_
{
float* _map_odds;
explicit assign_value_(float* map_odds):_map_odds(map_odds){}
__host__ __device__
void operator()(Eigen::Vector4f map_element)
{
int key = (int(map_element[0])+500)*1000*1000+(int(map_element[1])+500)*1000+(int(map_element[2])+500);
_map_odds[key] = map_element[3];
}
};
struct get_key
{
__host__ __device__
int operator()(Eigen::Vector4f map_element)
{
int key = (int(map_element[0])+500)*1000*1000+(int(map_element[1])+500)*1000+(int(map_element[2])+500);
return key;
}
};
struct faster_compute_point_score:public thrust::unary_function<Eigen::Vector3f, float> // <arg, result>
{
Eigen::Matrix4f _transform;
int _map_size;
float _map_resolution;
float* _map_odds;
explicit faster_compute_point_score(Eigen::Matrix4f transform, float* map_odds, int& map_size, float& map_resolution):_transform(transform), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(Eigen::Vector3f& point)
{
float score;
Eigen::Vector4f homo_point;
homo_point << point[0], point[1], point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * _transform.col(0) + homo_point[1] * _transform.col(1) + homo_point[2] * _transform.col(2) + homo_point[3] * _transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_map_resolution), roundf(homo_transformed_point[1]/_map_resolution), roundf(homo_transformed_point[2]/_map_resolution);
int key = (int(transformed_point[0])+500)*1000000+(int(transformed_point[1])+500)*1000+(int(transformed_point[2])+500);
if(_map_odds[key] > ODDS_LIMIT)
{
return 0.0;
}
else
{
return _map_odds[key];
}
}
};
struct faster_compute_cloud_score:public thrust::unary_function<Eigen::Matrix4f, float> // <arg, result>
{
Eigen::Vector3f* _scan;
int _scan_size;
float* _map_odds;
int _map_size;
float _map_resolution;
explicit faster_compute_cloud_score(Eigen::Vector3f* scan, int& scan_size, float* map_odds, int& map_size, float& map_resolution):_scan(scan), _scan_size(scan_size), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution){}
__host__ __device__
float operator()(const Eigen::Matrix4f& transform)
{
thrust::device_ptr<Eigen::Vector3f> dev_scan = thrust::device_pointer_cast(_scan);
float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, faster_compute_point_score(transform, thrust::raw_pointer_cast(_map_odds), _map_size, _map_resolution), 0.0, thrust::plus<float>());
float score = float(sum/_scan_size);
return score;
}
};
//V4
struct cal_transform{
int* _loop_size;
Eigen::Vector3f _angular_init_pose;
int _angular_winsize;
float _angular_step;
Eigen::Vector3f _linear_init_pose;
int _linear_winsize;
float _linear_step;
explicit cal_transform(int* loop_size,const Eigen::Vector3f& angular_init_pose, const int& angular_winsize, const float& angular_step,
const Eigen::Vector3f& linear_init_pose, const int& linear_winsize, const float& linear_step):
_loop_size(loop_size),_angular_init_pose(angular_init_pose),_angular_winsize(angular_winsize),_angular_step(angular_step),
_linear_init_pose(linear_init_pose),_linear_winsize(linear_winsize),_linear_step(linear_step){}
__host__ __device__
Eigen::Matrix4f operator()(const int& pose_seq){
Eigen::Matrix4f transform;
int roll = pose_seq/_loop_size[0];
int pit = pose_seq%_loop_size[0]/_loop_size[1];
int yaw = pose_seq%_loop_size[0]%_loop_size[1]/_loop_size[2];
int x1 = pose_seq%_loop_size[0]%_loop_size[1]%_loop_size[2]/_loop_size[3];
int y1 = pose_seq%_loop_size[0]%_loop_size[1]%_loop_size[2]%_loop_size[3]/_loop_size[4];
int z1 = pose_seq%_loop_size[0]%_loop_size[1]%_loop_size[2]%_loop_size[3]%_loop_size[4];
float alpha = (roll-_angular_winsize)*_angular_step + _angular_init_pose[0];
float beta = (pit-_angular_winsize)*_angular_step + _angular_init_pose[1];
float gamma = (yaw-_angular_winsize)*_angular_step + _angular_init_pose[2];
float x = (x1-_linear_winsize)*_linear_step + _linear_init_pose[0];
float y = (y1-_linear_winsize)*_linear_step + _linear_init_pose[1];
float z = (z1-_linear_winsize)*_linear_step + _linear_init_pose[2];
transform << cosf(beta)*cosf(gamma), -cosf(beta)*sinf(gamma), sinf(beta), x,
sinf(alpha)*sinf(beta)*cosf(gamma)+cosf(alpha)*sinf(gamma), -sinf(alpha)*sinf(beta)*sinf(gamma)+cosf(alpha)*cosf(gamma), -sinf(alpha)*cosf(beta), y,
-cosf(alpha)*sinf(beta)*cosf(gamma)+sinf(alpha)*sinf(gamma), cosf(alpha)*sinf(beta)*sinf(gamma)+sinf(alpha)*cosf(gamma), cosf(alpha)*cosf(beta), z,
0.0, 0.0, 0.0, 1.0;
// // XYZ order
// transform << cosf(beta)*cosf(alpha), sinf(gamma)*sinf(beta)*cosf(alpha)-cosf(gamma)*sinf(alpha), cosf(gamma)*sinf(beta)*cosf(alpha)+sinf(gamma)*sinf(alpha), x,
// cosf(beta)*sinf(alpha), sinf(gamma)*sinf(beta)*sinf(alpha)+cosf(gamma)*cosf(alpha), cosf(gamma)*sinf(beta)*sinf(alpha)-sinf(gamma)*cosf(alpha), y,
// -sinf(beta), sinf(gamma)*cosf(beta), cosf(gamma)*cosf(beta), z,
// 0.0, 0.0, 0.0, 1.0;
return transform;
}
};
thrust::device_vector<Eigen::Matrix4f> GenerateTransform(const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size){
int angular_space_size = 2*angular_window_size+1;
int linear_space_size = 2*linear_window_size+1;
int pose_num = int(pow(angular_space_size,3)*pow(linear_space_size, 3));
thrust::device_vector<Eigen::Matrix4f> transforms(pose_num);
int loop_size_rpyxyz = pose_num;
int loop_size_pyxyz = loop_size_rpyxyz/angular_space_size;
int loop_size_yxyz = loop_size_pyxyz/angular_space_size;
int loop_size_xyz = loop_size_yxyz/angular_space_size;
int loop_size_yz = loop_size_xyz/linear_space_size;
int loop_size_z = loop_size_yz/linear_space_size;
thrust::device_vector<int> loop_gap(6);
loop_gap[0] = loop_size_pyxyz;
loop_gap[1] = loop_size_yxyz;
loop_gap[2]=loop_size_xyz;
loop_gap[3]=loop_size_yz;
loop_gap[4]=loop_size_z;
thrust::device_vector<int> pose_seq(pose_num);
thrust::sequence(pose_seq.begin(), pose_seq.end());
cudaDeviceSynchronize();
thrust::transform(thrust::device, pose_seq.begin(), pose_seq.end(), transforms.begin(), cal_transform(
thrust::raw_pointer_cast(loop_gap.data()),angular_init_pose,angular_window_size,angular_step_size,
linear_init_pose, linear_window_size, linear_step_size));
cudaDeviceSynchronize();
return transforms;
}
struct get_element{
int x_;
explicit get_element(int x):x_(x){}
__host__ __device__
float operator()(const Eigen::Vector4f& point){
return int(point[x_]);
}
};
std::vector<int> MinMaxXYZ(thrust::device_vector<Eigen::Vector4f>& dev_map, int map_size){
thrust::device_vector<int> point_x(map_size);
thrust::device_vector<int> point_y(map_size);
thrust::device_vector<int> point_z(map_size);
thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), point_x.begin(), get_element(0));
thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), point_y.begin(), get_element(1));
thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), point_z.begin(), get_element(2));
cudaDeviceSynchronize();
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> resx = thrust::minmax_element(thrust::device, point_x.begin(), point_x.end());
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> resy = thrust::minmax_element(thrust::device, point_y.begin(), point_y.end());
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> resz = thrust::minmax_element(thrust::device, point_z.begin(), point_z.end());
cudaDeviceSynchronize();
// int min_x = *(thrust::min_element(thrust::device, point_x.begin(), point_x.end()));
// int max_x = *(thrust::max_element(thrust::device, point_x.begin(), point_x.end()));
// int min_y = *(thrust::min_element(thrust::device, point_y.begin(), point_y.end()));
// int max_y = *(thrust::max_element(thrust::device, point_y.begin(), point_y.end()));
// int min_z = *(thrust::min_element(thrust::device, point_z.begin(), point_z.end()));
// int max_z = *(thrust::max_element(thrust::device, point_z.begin(), point_z.end()));
std::vector<int> result;
result.push_back(*resx.first);
result.push_back(*resy.first);
result.push_back(*resz.first);
result.push_back(*resx.second);
result.push_back(*resy.second);
result.push_back(*resz.second);
return result;
}
struct assign_valueV2_
{
float* _map_odds;
int* _offset;
int* _map_length;
explicit assign_valueV2_(float* map_odds, int* offset, int* map_length):_map_odds(map_odds),_offset(offset),_map_length(map_length){}
__host__ __device__
void operator()(Eigen::Vector4f map_element)
{
int key = (int(map_element[0])+_offset[0])*_map_length[1]*_map_length[2]+(int(map_element[1])+_offset[1])*_map_length[2]+(int(map_element[2])+_offset[2]);
_map_odds[key] = map_element[3];
}
};
struct faster_compute_point_scoreV2:public thrust::unary_function<Eigen::Vector3f, float> // <arg, result>
{
Eigen::Matrix4f _transform;
int _map_size;
float _map_resolution;
float* _map_odds;
int* _offset;
int* _map_length;
int _map_odds_size;
explicit faster_compute_point_scoreV2(Eigen::Matrix4f transform, float* map_odds, int& map_size, float& map_resolution, int* offset, int* map_length, int map_odds_size):
_transform(transform), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution), _offset(offset), _map_length(map_length),_map_odds_size(map_odds_size){}
__host__ __device__
float operator()(Eigen::Vector3f& point)
{
float score;
Eigen::Vector4f homo_point;
homo_point << point[0], point[1], point[2], 1.0;
Eigen::Vector4f homo_transformed_point = homo_point[0] * _transform.col(0) + homo_point[1] * _transform.col(1) + homo_point[2] * _transform.col(2) + homo_point[3] * _transform.col(3);
Eigen::Vector3f transformed_point;
transformed_point << roundf(homo_transformed_point[0]/_map_resolution), roundf(homo_transformed_point[1]/_map_resolution), roundf(homo_transformed_point[2]/_map_resolution);
int tp_x = int(transformed_point[0])+_offset[0];
int tp_y = int(transformed_point[1])+_offset[1];
int tp_z = int(transformed_point[2])+_offset[2];
if(tp_x<0 || tp_x>=_map_length[0] || tp_y<0 || tp_y>=_map_length[1] || tp_z<0 || tp_z>=_map_length[2] ){
return 0.0;
}
int key = tp_x*_map_length[1]*_map_length[2]+tp_y*_map_length[2]+tp_z;
if(_map_odds[key] > ODDS_LIMIT)
{
return 0.0;
}
else
{
return _map_odds[key];
}
}
};
struct faster_compute_cloud_scoreV2:public thrust::unary_function<Eigen::Matrix4f, float> // <arg, result>
{
Eigen::Vector3f* _scan;
int _scan_size;
float* _map_odds;
int _map_size;
float _map_resolution;
int* _offset;
int* _map_length;
int _map_odds_size;
explicit faster_compute_cloud_scoreV2(Eigen::Vector3f* scan, int& scan_size, float* map_odds, int& map_size, float& map_resolution, int* offset, int* map_length, int map_odds_size):
_scan(scan), _scan_size(scan_size), _map_odds(map_odds), _map_size(map_size), _map_resolution(map_resolution),_offset(offset),_map_length(map_length),_map_odds_size(map_odds_size){}
__host__ __device__
float operator()(const Eigen::Matrix4f& transform)
{
thrust::device_ptr<Eigen::Vector3f> dev_scan = thrust::device_pointer_cast(_scan);
// float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, faster_compute_point_score(transform, thrust::raw_pointer_cast(_map_odds), _map_size, _map_resolution), 0.0, thrust::plus<float>());
float sum = thrust::transform_reduce(thrust::device, dev_scan, dev_scan+_scan_size, faster_compute_point_scoreV2(
transform, thrust::raw_pointer_cast(_map_odds), _map_size, _map_resolution,thrust::raw_pointer_cast(_offset),thrust::raw_pointer_cast(_map_length),_map_odds_size), 0.0, thrust::plus<float>());
float score = float(sum/_scan_size);
return score;
}
};
//__host__ __device__ bool operator==(const Eigen::Vector4f& lhs, const Eigen::Vector4f& rhs)
//{
// return fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6;
//}
//
//__host__ __device__ bool operator>(const Eigen::Vector4f& lhs, const Eigen::Vector4f& rhs)
//{
// return (lhs[0]>rhs[0])||(fabs(lhs[0]-rhs[0])<1e-6&&lhs[1]>rhs[1])||(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&lhs[2]>rhs[2])||(fabs(lhs[0]-rhs[0])<1e-6&&fabs(lhs[1]-rhs[1])<1e-6&&fabs(lhs[2]-rhs[2])<1e-6&&lhs[3]>rhs[3]);
//}
void ComputeOptimalPoseV1(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
const float& map_resolution)
{
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses = GeneratePoses(
angular_init_pose, angular_window_size, angular_step_size,
linear_init_pose, linear_window_size, linear_step_size);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms(poses.size());
thrust::transform(thrust::device, poses.begin(), poses.end(), transforms.begin(), get_transform());
cudaDeviceSynchronize();
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to generate transforms: %3.1f ms \n", time);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<Eigen::Vector3f> trans_point(poses.size());
thrust::device_vector<float> score_tile(poses.size());
thrust::device_vector<float> score_bins(poses.size());
thrust::fill(thrust::device, score_bins.begin(), score_bins.end(), 0.0);
cudaDeviceSynchronize();
int map_size = map.size();
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
thrust::sort(thrust::device, dev_map.begin(), dev_map.end(), sort_map_point());
cudaDeviceSynchronize();
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
for(int i = 0 ; i < scan.size(); i++)
{
thrust::transform(thrust::device, transforms.begin(), transforms.end(), trans_point.begin(),
point_transform(scan[i], map_resolution));
cudaDeviceSynchronize();
thrust::transform(thrust::device, trans_point.begin(), trans_point.end(), score_tile.begin(),
match(thrust::raw_pointer_cast(&dev_map[0]), map_size));
cudaDeviceSynchronize();
thrust::transform(thrust::device, score_bins.begin(), score_bins.end(), score_tile.begin(), score_bins.begin(), thrust::plus<float>());
cudaDeviceSynchronize();
}
thrust::transform(thrust::device, score_bins.begin(), score_bins.end(), score_bins.begin(), compute_score(scan_size));
cudaDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(score_bins.begin(), score_bins.end());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to compute optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - score_bins.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<score_bins[opt_pose_idx]<<std::endl;
// thrust::host_vector<Eigen::Matrix<float, 6, 1> > host_poses = poses;
// std::cout<<"Optimal Pose: (roll)"<<host_poses[opt_pose_idx][0]<<" rad, (pitch)"
// <<host_poses[opt_pose_idx][1]<<" rad, (yaw)"
// <<host_poses[opt_pose_idx][2]<<" rad, (x)"
// <<host_poses[opt_pose_idx][3]<<" m, (y)"
// <<host_poses[opt_pose_idx][4]<<" m, (z)"
// <<host_poses[opt_pose_idx][5]<<" m"<<std::endl;
}
void ComputeOptimalPoseV2(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
float& map_resolution)
{
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses = GeneratePoses(angular_init_pose, angular_window_size, angular_step_size, linear_init_pose, linear_window_size, linear_step_size);
int pose_num = poses.size();
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms(pose_num);
thrust::transform(thrust::device, poses.begin(), poses.end(), transforms.begin(), get_transform());
cudaDeviceSynchronize();
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to generate transforms: %3.1f ms \n", time);
thrust::device_vector<Eigen::Vector3f> dev_scan = scan;
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
int map_size = map.size();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::sort(thrust::device, dev_map.begin(), dev_map.end(), sort_map_point());
cudaDeviceSynchronize();
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
thrust::device_vector<float> scores(pose_num);
thrust::transform(thrust::device, transforms.begin(), transforms.end(), scores.begin(), compute_cloud_score(thrust::raw_pointer_cast(dev_scan.data()), scan_size,
thrust::raw_pointer_cast(dev_map.data()), map_size, map_resolution));
cudaDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(scores.begin(), scores.end());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to compute optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - scores.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<scores[opt_pose_idx]<<std::endl;
// thrust::host_vector<Eigen::Matrix<float, 6, 1> > host_poses = poses;
// std::cout<<"Optimal Pose: (roll)"<<host_poses[opt_pose_idx][0]<<" rad, (pitch)"
// <<host_poses[opt_pose_idx][1]<<" rad, (yaw)"
// <<host_poses[opt_pose_idx][2]<<" rad, (x)"
// <<host_poses[opt_pose_idx][3]<<" m, (y)"
// <<host_poses[opt_pose_idx][4]<<" m, (z)"
// <<host_poses[opt_pose_idx][5]<<" m"<<std::endl;
}
void ComputeOptimalPoseV3(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
float& map_resolution)
{
thrust::device_vector<Eigen::Matrix<float, 6, 1> > poses = GeneratePoses(angular_init_pose, angular_window_size, angular_step_size, linear_init_pose, linear_window_size, linear_step_size);
int pose_num = poses.size();
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms(pose_num);
thrust::transform(thrust::device, poses.begin(), poses.end(), transforms.begin(), get_transform());
cudaDeviceSynchronize();
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Generate transformations: %3.1f ms \n", time);
thrust::device_vector<Eigen::Vector3f> dev_scan = scan;
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
int map_size = map.size();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int map_odds_size = pow(1000,3);
thrust::device_vector<float> map_odds(map_odds_size);
thrust::fill(thrust::device, map_odds.begin(), map_odds.end(), NULL_ODDS);
cudaDeviceSynchronize();
// method 1
// thrust::device_vector<int> indices(map.size());
// thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), indices.begin(), get_key());
// cudaDeviceSynchronize();
// thrust::permutation_iterator<thrust::device_vector<float>::iterator, thrust::device_vector<int>::iterator> iter(map_odds.begin(), indices.begin());
// thrust::transform(thrust::device, dev_map.begin(), dev_map.end(), iter, assign_value());
// cudaDeviceSynchronize();
// method 2
thrust::for_each(thrust::device, dev_map.begin(), dev_map.end(), assign_value_(thrust::raw_pointer_cast(map_odds.data())));
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Generate hashmap: %3.1f ms \n", time);
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
// create thrust vector of thrust vector
// thrust::device_vector<Eigen::Vector3f> trans_scans[pose_num];
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<float> scores(pose_num);
thrust::transform(thrust::device, transforms.begin(), transforms.end(), scores.begin(), faster_compute_cloud_score(thrust::raw_pointer_cast(dev_scan.data()), scan_size,
thrust::raw_pointer_cast(map_odds.data()), map_size, map_resolution));
cudaDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(scores.begin(), scores.end());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Calculate optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - scores.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<scores[opt_pose_idx]<<std::endl;
// thrust::host_vector<Eigen::Matrix<float, 6, 1> > host_poses = poses;
// std::cout<<"Optimal Pose: (roll)"<<host_poses[opt_pose_idx][0]<<" rad, (pitch)"
// <<host_poses[opt_pose_idx][1]<<" rad, (yaw)"
// <<host_poses[opt_pose_idx][2]<<" rad, (x)"
// <<host_poses[opt_pose_idx][3]<<" m, (y)"
// <<host_poses[opt_pose_idx][4]<<" m, (z)"
// <<host_poses[opt_pose_idx][5]<<" m"<<std::endl;
}
void ComputeOptimalPoseV4(const std::vector<Eigen::Vector3f>& scan, const std::vector<Eigen::Vector4f>& map,
const Eigen::Vector3f& angular_init_pose, const int& angular_window_size, const float& angular_step_size,
const Eigen::Vector3f& linear_init_pose, const int& linear_window_size, const float& linear_step_size,
float& map_resolution)
{
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<Eigen::Matrix4f> transforms = GenerateTransform(angular_init_pose, angular_window_size, angular_step_size,
linear_init_pose, linear_window_size, linear_step_size);
// std::cout<<"Number of generated poses: "<<transforms.size()<<std::endl;
int pose_num = transforms.size();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to generate transforms: %3.1f ms \n", time);
thrust::device_vector<Eigen::Vector3f> dev_scan = scan;
int scan_size = scan.size();
thrust::device_vector<Eigen::Vector4f> dev_map = map;
int map_size = map.size();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
std::vector<int> minmax = MinMaxXYZ(dev_map, map_size);
thrust::device_vector<int> dev_offset(3);
dev_offset[0] = -minmax[0];
dev_offset[1] = -minmax[1];
dev_offset[2] = -minmax[2];
thrust::device_vector<int> dev_maplength(3);
dev_maplength[0] = minmax[3]-minmax[0]+1;
dev_maplength[1] = minmax[4]-minmax[1]+1;
dev_maplength[2] = minmax[5]-minmax[2]+1;
int map_odds_size = dev_maplength[0]*dev_maplength[1]*dev_maplength[2];
thrust::device_vector<float> map_odds(map_odds_size);
thrust::fill(thrust::device, map_odds.begin(), map_odds.end(), NULL_ODDS);
cudaDeviceSynchronize();
thrust::for_each(thrust::device, dev_map.begin(), dev_map.end(), assign_valueV2_(thrust::raw_pointer_cast(map_odds.data()),
thrust::raw_pointer_cast(dev_offset.data()), thrust::raw_pointer_cast(dev_maplength.data())));
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to generate hashmap: %3.1f ms \n", time);
// std::cout<<"Number of points in scan: "<<scan_size<<std::endl;
// std::cout<<"Number of points in map: "<<map_size<<std::endl;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<float> scores(pose_num);
thrust::transform(thrust::device, transforms.begin(), transforms.end(), scores.begin(), faster_compute_cloud_scoreV2(
thrust::raw_pointer_cast(dev_scan.data()), scan_size,thrust::raw_pointer_cast(map_odds.data()), map_size, map_resolution,
thrust::raw_pointer_cast(dev_offset.data()), thrust::raw_pointer_cast(dev_maplength.data()),map_odds_size));
cudaDeviceSynchronize();
thrust::device_vector<float>::iterator max_element_iter = thrust::max_element(scores.begin(), scores.end());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// printf("Time to compute optimal pose: %3.1f ms \n", time);
int opt_pose_idx = max_element_iter - scores.begin();
// std::cout<<"Optimal Pose Index: "<<opt_pose_idx<<std::endl;
std::cout<<"Optimal Pose Score: "<<scores[opt_pose_idx]<<std::endl;
}
|
a447cfb4583ba1aa55356d011a8f1f471594cb20.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_process.cuh"
#include "cuda_module.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_functions.h"
#include "helper_cuda.h"
#include <stdio.h>
#include <iostream>
#include <hipfft.h>
#include <complex>
//int main(int argc, char **argv){}
//The cufft must be invoked by the host, not as part of a kernel.
//num_wins is the total number of windows, without averaging. Therefore total windows abould be number of output frames * averaging to ensure no wasted samples.
float* dothething_overlap(std::complex<short>* h_samp_arry, const int averaging, float* h_out, const int num_wins, const int overlap) {
bool error = false;
/*
if (num_wins == 0) {
std::cout << "AMG NO WINS!\n";
num_wins = sizeof(h_samp_arry) / (2 * NUM_SAMPS);
std::cout << "Number of windows: " << num_wins << std::endl;
} */
//const int num_wins = 1;
//hipComplex* samp[NUM_SAMPS];
//std::complex<short>* d_samp;
hipError_t cudaStatus;
// for outputting of averaged and processed samples /
h_out = (float*)malloc((sizeof(float) * NUM_SAMPS * (num_wins*overlap)/averaging));
//h_out = (float*)calloc(NUM_SAMPS * num_wins / averaging, sizeof(float));
if (h_out == NULL) {
fprintf(stderr, "h_out Malloc failed!");
goto Error;
}
float* d_out;
cuComplexShort* h_samp_ptr = (cuComplexShort*)&h_samp_arry[0];
//std::cout << h_samp_arry[0].real() << "," << h_samp_arry[0].imag() << " cuCmplx" << h_samp_ptr[0].x << "," << h_samp_ptr[0].y << std::endl;
float h_win[NUM_SAMPS];
float* d_win;
cuComplexShort* d_samp;
hipComplex* d_fftbuff;
float win_power = 0;
int rx_gain = 30;
//Create coefficient array and x axis index for plotting
for (int i = 0; i < NUM_SAMPS; i++) {
h_win[i] = 0.35875 - 0.48829*cos(2 * pi*i / (NUM_SAMPS - 1)) + 0.14128*cos(4 * pi*i / (NUM_SAMPS - 1)) - 0.01168*cos(6 * pi*i / (NUM_SAMPS - 1)); //blackmann harris window
win_power += (h_win[i] * h_win[i]); //this computes the total window power and normalises it to account for DC gain due to the window.
}
win_power /= NUM_SAMPS; //normalise the total window power across each sample.
const float offset = -10 - rx_gain + 10 * std::log10(win_power); //-10 is the MAX power detected by the ADC and take into account the gain of the frontend.
//printf("GPU Offset: %f", offset);
//allocate the memory for the GPU
cudaStatus = hipMalloc((float**)&d_out, (sizeof(float)*NUM_SAMPS * (num_wins*overlap) / averaging));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc d_out failed!");
error = true;
goto Error;
}
hipMemset(d_out, 0, (sizeof(float)*NUM_SAMPS * (num_wins*overlap) / averaging)); //initialise to zero
cudaStatus = hipMalloc((cuComplexShort**)&d_samp, sizeof(cuComplexShort)*NUM_SAMPS*num_wins);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc d_samp failed!");
error = true;
goto Error;
}
cudaStatus = hipMalloc((float**)&d_win, sizeof(float)*NUM_SAMPS);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc d_win failed!");
error = true;
goto Error;
}
cudaStatus = hipMalloc((hipComplex**)&d_fftbuff, sizeof(hipComplex)*NUM_SAMPS*num_wins*overlap);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc d_fftbuff failed!");
error = true;
goto Error;
}
//Transfer data to GPU
cudaStatus = hipMemcpy(d_win, h_win, sizeof(float)*NUM_SAMPS, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy win to Device failed!");
error = true;
goto Error;
}
cudaStatus = hipMemcpy(d_samp, h_samp_ptr, sizeof(cuComplexShort)*NUM_SAMPS*num_wins, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy samp to Device failed!");
error = true;
goto Error;
}
//Create cufft plan, turns out cufft handles its own memory transfers, so we must use callbacks in order to avoid numerous reads and writes in the device
//Will however use multiple kernels initially, then see what the performance improvement is with callbacks at a later stage. n.n
hipfftHandle plan;
hipfftPlan1d(&plan, NUM_SAMPS, HIPFFT_C2C, num_wins*overlap);
//printf("h_samp[%d]=%f,%f ", 0, s_ptr[0].x, s_ptr[0].y);
//printf("d_samp[%d]=%f,%f\n", 0,d_samp[0].x,d_samp[0].y);
// Kernel calls lah <<blocks,threads>>
hipLaunchKernelGGL(( cufft_prep_overlap), dim3((NUM_SAMPS*num_wins) / CU_THD), dim3(CU_THD) , 0, 0, d_fftbuff, d_samp, d_win, num_wins, overlap); //This will create (WIN_SAMPS*num_wins)/CU_THD blocks, with 1024 threads per block
//inplace fft
if (hipfftExecC2C(plan, d_fftbuff, d_fftbuff, HIPFFT_FORWARD)){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
error = true;
goto Error;
}
//Do something with the fft'd samples, like average them, then output them to the host, where the host can perform detection.
hipLaunchKernelGGL(( avg_out_overlap) , dim3(NUM_SAMPS / CU_THD), dim3(CU_THD) , 0, 0, d_out, d_fftbuff, num_wins, averaging, offset, overlap);
cudaStatus = hipMemcpy(h_out, d_out, sizeof(float)*NUM_SAMPS * ((num_wins*overlap)/averaging), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy out to Host failed!");
error = true;
goto Error;
}
/*
std::cout << "GPU: ";
for (int i = 0; i < NUM_SAMPS; i++) {
std::cout << h_win[i] << ",";
}
std::cout << "Please note these are not flipped around samples/2 correctly" << std::endl;
*/
Error:
hipfftDestroy(plan);
checkCudaErrors(hipFree(d_out));
checkCudaErrors(hipFree(d_samp));
checkCudaErrors(hipFree(d_win));
checkCudaErrors(hipFree(d_fftbuff));
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
error = true;
}
if (error) {
fprintf(stderr, "\nGPU ERROPR!\n");
system("pause");
}
return h_out;
}
//Kernel Call
//https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-use-cufft-callbacks-custom-data-processing/ for inspiration
static __global__ void cufft_prep_overlap(hipComplex* d_fft, cuComplexShort* d_s, float* d_w, const int num_wins, int overlap) {
int idx = threadIdx.x;
//blockDim = number of threads in a block
//This will take an array of complex shorts (14b samples) an array of hipComplex and a window array, will convert the com_short to hipComplex (com_float), correctly scale the samples and apply the appropriate window prepping it for fft
for (int i = blockIdx.x * blockDim.x + idx, int j = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*num_wins*overlap; i += blockDim.x * gridDim.x, j += (blockDim.x * gridDim.x )/ overlap){
d_fft[i].x = (d_s[j].x*1.0f / 32767.0f) * d_w[i%NUM_SAMPS];
d_fft[i].y = (d_s[j].y*1.0f / 32767.0f) * d_w[i%NUM_SAMPS];
}
//if(idx == 0) printf("d_s[%d]: %f,%f fftbuff %f,%f\n", idx, d_s[idx].x, d_s[idx].y, d_s[idx].x, d_s[idx].x);
}
static __global__ void avg_out_overlap(float* out, hipComplex* d_fft, const int num_wins, const int averaging, const float offset, int overlap) {
int idx = threadIdx.x;
float* out_ptr = &out[0];
hipComplex* d_fft_ptr = &d_fft[0];
const float threshold = -113;
for (int j = 0; j < (num_wins*overlap)/averaging; j++){
for (int i = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*averaging; i += blockDim.x * gridDim.x){
out_ptr[((NUM_SAMPS/2)+i)%NUM_SAMPS] += (
10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / NUM_SAMPS) //DFT bin magnitude
);
}
// __syncthreads();
out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] = ((out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] / averaging + offset) <= threshold) ? 1 : 0;
//out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] = (out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] / averaging + offset);// <= threshold) ? 1 : 0;
// if (out_ptr[blockIdx.x * blockDim.x + idx] <= threshold) out_ptr[blockIdx.x * blockDim.x + idx] = 1;
// else out_ptr[blockIdx.x * blockDim.x + idx] = 0;
out_ptr += NUM_SAMPS; //increment out_ptr by one frame of averages
d_fft_ptr += NUM_SAMPS*averaging; //increment d_fft_ptr by number of frames averaged
}
}
/* BACKUP LOL
static __global__ void avg_out(float* out, hipComplex* d_fft, const int num_wins, const int averaging) {
int idx = threadIdx.x;
float* out_ptr = &out[0];
hipComplex* d_fft_ptr = &d_fft[0];
for (int j = 0; j < num_wins / averaging; j++){
for (int i = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*averaging; i += blockDim.x * gridDim.x){
out_ptr[i%NUM_SAMPS] += (
10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / NUM_SAMPS) //DFT bin magnitude
);
}
out_ptr += NUM_SAMPS; //increment out_ptr by one frame of averages
d_fft_ptr += NUM_SAMPS*averaging; //increment d_fft_ptr by number of frames averaged
}
}*/ | a447cfb4583ba1aa55356d011a8f1f471594cb20.cu | #include "cuda_process.cuh"
#include "cuda_module.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_functions.h"
#include "helper_cuda.h"
#include <stdio.h>
#include <iostream>
#include <cufft.h>
#include <complex>
//int main(int argc, char **argv){}
//The cufft must be invoked by the host, not as part of a kernel.
//num_wins is the total number of windows, without averaging. Therefore total windows abould be number of output frames * averaging to ensure no wasted samples.
float* dothething_overlap(std::complex<short>* h_samp_arry, const int averaging, float* h_out, const int num_wins, const int overlap) {
bool error = false;
/*
if (num_wins == 0) {
std::cout << "AMG NO WINS!\n";
num_wins = sizeof(h_samp_arry) / (2 * NUM_SAMPS);
std::cout << "Number of windows: " << num_wins << std::endl;
} */
//const int num_wins = 1;
//cuComplex* samp[NUM_SAMPS];
//std::complex<short>* d_samp;
cudaError_t cudaStatus;
// for outputting of averaged and processed samples /
h_out = (float*)malloc((sizeof(float) * NUM_SAMPS * (num_wins*overlap)/averaging));
//h_out = (float*)calloc(NUM_SAMPS * num_wins / averaging, sizeof(float));
if (h_out == NULL) {
fprintf(stderr, "h_out Malloc failed!");
goto Error;
}
float* d_out;
cuComplexShort* h_samp_ptr = (cuComplexShort*)&h_samp_arry[0];
//std::cout << h_samp_arry[0].real() << "," << h_samp_arry[0].imag() << " cuCmplx" << h_samp_ptr[0].x << "," << h_samp_ptr[0].y << std::endl;
float h_win[NUM_SAMPS];
float* d_win;
cuComplexShort* d_samp;
cuComplex* d_fftbuff;
float win_power = 0;
int rx_gain = 30;
//Create coefficient array and x axis index for plotting
for (int i = 0; i < NUM_SAMPS; i++) {
h_win[i] = 0.35875 - 0.48829*cos(2 * pi*i / (NUM_SAMPS - 1)) + 0.14128*cos(4 * pi*i / (NUM_SAMPS - 1)) - 0.01168*cos(6 * pi*i / (NUM_SAMPS - 1)); //blackmann harris window
win_power += (h_win[i] * h_win[i]); //this computes the total window power and normalises it to account for DC gain due to the window.
}
win_power /= NUM_SAMPS; //normalise the total window power across each sample.
const float offset = -10 - rx_gain + 10 * std::log10(win_power); //-10 is the MAX power detected by the ADC and take into account the gain of the frontend.
//printf("GPU Offset: %f", offset);
//allocate the memory for the GPU
cudaStatus = cudaMalloc((float**)&d_out, (sizeof(float)*NUM_SAMPS * (num_wins*overlap) / averaging));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc d_out failed!");
error = true;
goto Error;
}
cudaMemset(d_out, 0, (sizeof(float)*NUM_SAMPS * (num_wins*overlap) / averaging)); //initialise to zero
cudaStatus = cudaMalloc((cuComplexShort**)&d_samp, sizeof(cuComplexShort)*NUM_SAMPS*num_wins);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc d_samp failed!");
error = true;
goto Error;
}
cudaStatus = cudaMalloc((float**)&d_win, sizeof(float)*NUM_SAMPS);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc d_win failed!");
error = true;
goto Error;
}
cudaStatus = cudaMalloc((cuComplex**)&d_fftbuff, sizeof(cuComplex)*NUM_SAMPS*num_wins*overlap);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc d_fftbuff failed!");
error = true;
goto Error;
}
//Transfer data to GPU
cudaStatus = cudaMemcpy(d_win, h_win, sizeof(float)*NUM_SAMPS, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy win to Device failed!");
error = true;
goto Error;
}
cudaStatus = cudaMemcpy(d_samp, h_samp_ptr, sizeof(cuComplexShort)*NUM_SAMPS*num_wins, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy samp to Device failed!");
error = true;
goto Error;
}
//Create cufft plan, turns out cufft handles its own memory transfers, so we must use callbacks in order to avoid numerous reads and writes in the device
//Will however use multiple kernels initially, then see what the performance improvement is with callbacks at a later stage. n.n
cufftHandle plan;
cufftPlan1d(&plan, NUM_SAMPS, CUFFT_C2C, num_wins*overlap);
//printf("h_samp[%d]=%f,%f ", 0, s_ptr[0].x, s_ptr[0].y);
//printf("d_samp[%d]=%f,%f\n", 0,d_samp[0].x,d_samp[0].y);
// Kernel calls lah <<blocks,threads>>
cufft_prep_overlap<<<(NUM_SAMPS*num_wins) / CU_THD, CU_THD >>>(d_fftbuff, d_samp, d_win, num_wins, overlap); //This will create (WIN_SAMPS*num_wins)/CU_THD blocks, with 1024 threads per block
//inplace fft
if (cufftExecC2C(plan, d_fftbuff, d_fftbuff, CUFFT_FORWARD)){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
error = true;
goto Error;
}
//Do something with the fft'd samples, like average them, then output them to the host, where the host can perform detection.
avg_out_overlap <<<NUM_SAMPS / CU_THD, CU_THD >>>(d_out, d_fftbuff, num_wins, averaging, offset, overlap);
cudaStatus = cudaMemcpy(h_out, d_out, sizeof(float)*NUM_SAMPS * ((num_wins*overlap)/averaging), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy out to Host failed!");
error = true;
goto Error;
}
/*
std::cout << "GPU: ";
for (int i = 0; i < NUM_SAMPS; i++) {
std::cout << h_win[i] << ",";
}
std::cout << "Please note these are not flipped around samples/2 correctly" << std::endl;
*/
Error:
cufftDestroy(plan);
checkCudaErrors(cudaFree(d_out));
checkCudaErrors(cudaFree(d_samp));
checkCudaErrors(cudaFree(d_win));
checkCudaErrors(cudaFree(d_fftbuff));
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
error = true;
}
if (error) {
fprintf(stderr, "\nGPU ERROPR!\n");
system("pause");
}
return h_out;
}
//Kernel Call
//https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-use-cufft-callbacks-custom-data-processing/ for inspiration
static __global__ void cufft_prep_overlap(cuComplex* d_fft, cuComplexShort* d_s, float* d_w, const int num_wins, int overlap) {
int idx = threadIdx.x;
//blockDim = number of threads in a block
//This will take an array of complex shorts (14b samples) an array of cuComplex and a window array, will convert the com_short to cuComplex (com_float), correctly scale the samples and apply the appropriate window prepping it for fft
for (int i = blockIdx.x * blockDim.x + idx, int j = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*num_wins*overlap; i += blockDim.x * gridDim.x, j += (blockDim.x * gridDim.x )/ overlap){
d_fft[i].x = (d_s[j].x*1.0f / 32767.0f) * d_w[i%NUM_SAMPS];
d_fft[i].y = (d_s[j].y*1.0f / 32767.0f) * d_w[i%NUM_SAMPS];
}
//if(idx == 0) printf("d_s[%d]: %f,%f fftbuff %f,%f\n", idx, d_s[idx].x, d_s[idx].y, d_s[idx].x, d_s[idx].x);
}
static __global__ void avg_out_overlap(float* out, cuComplex* d_fft, const int num_wins, const int averaging, const float offset, int overlap) {
int idx = threadIdx.x;
float* out_ptr = &out[0];
cuComplex* d_fft_ptr = &d_fft[0];
const float threshold = -113;
for (int j = 0; j < (num_wins*overlap)/averaging; j++){
for (int i = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*averaging; i += blockDim.x * gridDim.x){
out_ptr[((NUM_SAMPS/2)+i)%NUM_SAMPS] += (
10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / NUM_SAMPS) //DFT bin magnitude
);
}
// __syncthreads();
out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] = ((out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] / averaging + offset) <= threshold) ? 1 : 0;
//out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] = (out_ptr[(NUM_SAMPS / 2 + blockIdx.x * blockDim.x + idx) % NUM_SAMPS] / averaging + offset);// <= threshold) ? 1 : 0;
// if (out_ptr[blockIdx.x * blockDim.x + idx] <= threshold) out_ptr[blockIdx.x * blockDim.x + idx] = 1;
// else out_ptr[blockIdx.x * blockDim.x + idx] = 0;
out_ptr += NUM_SAMPS; //increment out_ptr by one frame of averages
d_fft_ptr += NUM_SAMPS*averaging; //increment d_fft_ptr by number of frames averaged
}
}
/* BACKUP LOL
static __global__ void avg_out(float* out, cuComplex* d_fft, const int num_wins, const int averaging) {
int idx = threadIdx.x;
float* out_ptr = &out[0];
cuComplex* d_fft_ptr = &d_fft[0];
for (int j = 0; j < num_wins / averaging; j++){
for (int i = blockIdx.x * blockDim.x + idx; i < NUM_SAMPS*averaging; i += blockDim.x * gridDim.x){
out_ptr[i%NUM_SAMPS] += (
10 * log10(abs(d_fft_ptr[i].x * d_fft_ptr[i].x + d_fft_ptr[i].y * d_fft_ptr[i].y) / NUM_SAMPS) //DFT bin magnitude
);
}
out_ptr += NUM_SAMPS; //increment out_ptr by one frame of averages
d_fft_ptr += NUM_SAMPS*averaging; //increment d_fft_ptr by number of frames averaged
}
}*/ |
098d2b64069fc8c9b545234273dd788d0790b6d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
// Includes
#include <stdio.h>
#include <cutil_inline.h>
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
// Functions
void Cleanup(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Host code
int main(int argc, char** argv)
{
printf("Vector addition\n");
int N = 50000;
size_t size = N * sizeof(float);
ParseArguments(argc, argv);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) Cleanup();
h_B = (float*)malloc(size);
if (h_B == 0) Cleanup();
h_C = (float*)malloc(size);
if (h_C == 0) Cleanup();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
cutilSafeCall( hipMalloc((void**)&d_A, size) );
cutilSafeCall( hipMalloc((void**)&d_B, size) );
cutilSafeCall( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
cutilSafeCall( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
cutilCheckMsg("kernel launch failure");
#ifdef _DEBUG
cutilSafeCall( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
cutilSafeCall( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i] + h_B[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
printf("%s \n", (i == N) ? "PASSED" : "FAILED");
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
cutilSafeCall( hipDeviceReset() );
if (!noprompt) {
printf("\nPress ENTER to exit...\n");
fflush( stdout);
fflush( stderr);
getchar();
}
exit(0);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
if (strcmp(argv[i], "--noprompt") == 0 ||
strcmp(argv[i], "-noprompt") == 0)
{
noprompt = true;
break;
}
}
| 098d2b64069fc8c9b545234273dd788d0790b6d6.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
// Includes
#include <stdio.h>
#include <cutil_inline.h>
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
// Functions
void Cleanup(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Host code
int main(int argc, char** argv)
{
printf("Vector addition\n");
int N = 50000;
size_t size = N * sizeof(float);
ParseArguments(argc, argv);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) Cleanup();
h_B = (float*)malloc(size);
if (h_B == 0) Cleanup();
h_C = (float*)malloc(size);
if (h_C == 0) Cleanup();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
cutilSafeCall( cudaMalloc((void**)&d_A, size) );
cutilSafeCall( cudaMalloc((void**)&d_B, size) );
cutilSafeCall( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
cutilSafeCall( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
cutilCheckMsg("kernel launch failure");
#ifdef _DEBUG
cutilSafeCall( cudaThreadSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
cutilSafeCall( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i] + h_B[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
printf("%s \n", (i == N) ? "PASSED" : "FAILED");
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
cutilSafeCall( cudaThreadExit() );
if (!noprompt) {
printf("\nPress ENTER to exit...\n");
fflush( stdout);
fflush( stderr);
getchar();
}
exit(0);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
if (strcmp(argv[i], "--noprompt") == 0 ||
strcmp(argv[i], "-noprompt") == 0)
{
noprompt = true;
break;
}
}
|
c91f35ff336803a02ca29517b75681ef9c5eefb4.hip | // !!! This is a file automatically generated by hipify!!!
/* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
**/
// C++ includes
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
// CUDA includes
#include <hip/hip_runtime.h>
// CMSSW includes
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "CondFormats/SiPixelObjects/interface/SiPixelROCsStatusAndMapping.h"
#include "DataFormats/FEDRawData/interface/FEDNumbering.h"
#include "DataFormats/TrackerCommon/interface/TrackerTopology.h"
#include "DataFormats/SiPixelDigi/interface/SiPixelDigiConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
namespace pixelgpudetails {
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender(uint32_t maxFedWords) {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(maxFedWords, hipHostMallocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(maxFedWords, hipHostMallocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const cms_uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(cms_uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - FEDNumbering::MINSiPixeluTCAFEDID, length / 2);
}
////////////////////
__device__ bool isBarrel(uint32_t rawId) {
return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask));
}
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
// inside frameConversion row: gRow, column: gCol
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
/// row and column in ROC representation
return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t checkROC(uint32_t errorWord,
uint8_t fedId,
uint32_t link,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int stateMatch_bits = 4;
int stateMatch_shift = 8;
uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits);
int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask;
if (stateMatch != 1 && stateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (stateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
uint32_t roc = 1;
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 37:
case 38: {
uint32_t roc = sipixelconstants::getROC(errWord);
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<SiPixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = gpuClustering::invalidModuleId;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = sipixelconstants::getLink(ww); // Extract link
uint32_t roc = sipixelconstants::getROC(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.rawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0;
int side = 0, panel = 0, module = 0;
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
side = (panel == 1) ? -1 : 1;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = sipixelconstants::getCol(ww);
uint32_t row = sipixelconstants::getRow(ww);
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = sipixelconstants::getDCol(ww);
uint32_t pxid = sipixelconstants::getPxId(ww);
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = sipixelconstants::getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ clusInModule,
uint32_t *__restrict__ moduleStart,
uint32_t const *__restrict__ nModules,
uint32_t *__restrict__ nModules_Clusters) {
assert(gpuClustering::maxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to maxHitsInModule()
for (int i = first, iend = gpuClustering::maxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = ::min(gpuClustering::maxHitsInModule(), clusInModule[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::maxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
if (threadIdx.x == 0) {
// copy the number of modules
nModules_Clusters[0] = *nModules;
// last element holds the number of all clusters
nModules_Clusters[1] = moduleStart[gpuClustering::maxNumModules];
// element 96 is the start of BPIX2 (i.e. the number of clusters in BPIX1)
nModules_Clusters[2] = moduleStart[96];
}
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = ::min(gpuClustering::maxHitsInModule(), clusInModule[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::maxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::maxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelClusterThresholds clusterThresholds,
const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
SiPixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
const uint32_t maxFedWords,
bool useQualityInfo,
bool includeErrors,
bool debug,
hipStream_t stream) {
// we're not opting for calling this function in case of early events
assert(wordCounter != 0);
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << maxFedWords << std::endl;
#endif
// since wordCounter != 0 we're not allocating 0 bytes,
digis_d = SiPixelDigisCUDA(wordCounter, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(wordCounter, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::maxNumModules, stream);
// Begin Raw2Digi block
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
hipMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), hipMemcpyDefault, stream));
cudaCheck(hipMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, hipMemcpyDefault, stream));
// Launch rawToDigi kernel
hipLaunchKernelGGL(( RawToDigi_kernel), dim3(blocks), dim3(threadsPerBlock), 0, stream,
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.view().xx(),
digis_d.view().yy(),
digis_d.view().adc(),
digis_d.view().pdigi(),
digis_d.view().rawIdArr(),
digis_d.view().moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(::max(int(wordCounter), int(gpuClustering::maxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
if (isRun2)
hipLaunchKernelGGL(( gpuCalibPixel::calibDigis<true>), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d.view().moduleInd(),
digis_d.view().xx(),
digis_d.view().yy(),
digis_d.view().adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
else
hipLaunchKernelGGL(( gpuCalibPixel::calibDigis<false>), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d.view().moduleInd(),
digis_d.view().xx(),
digis_d.view().yy(),
digis_d.view().adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
hipLaunchKernelGGL(( countModules), dim3(blocks), dim3(threadsPerBlock), 0, stream,
digis_d.view().moduleInd(), clusters_d.moduleStart(), digis_d.view().clus(), wordCounter);
cudaCheck(hipGetLastError());
threadsPerBlock = 256 + 128; /// should be larger than 6000/16 aka (maxPixInModule/maxiter in the kernel)
blocks = maxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
hipLaunchKernelGGL(( findClus), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d.view().moduleInd(),
digis_d.view().xx(),
digis_d.view().yy(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.view().clus(),
wordCounter);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// apply charge cut
hipLaunchKernelGGL(( clusterChargeCut), dim3(blocks), dim3(threadsPerBlock), 0, stream, clusterThresholds,
digis_d.view().moduleInd(),
digis_d.view().adc(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.view().clus(),
wordCounter);
cudaCheck(hipGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
auto nModules_Clusters_d = cms::cuda::make_device_unique<uint32_t[]>(3, stream);
// MUST be ONE block
hipLaunchKernelGGL(( fillHitsModuleStart), dim3(1), dim3(1024), 0, stream,
clusters_d.clusInModule(), clusters_d.clusModuleStart(), clusters_d.moduleStart(), nModules_Clusters_d.get());
// copy to host
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(3, stream);
cudaCheck(hipMemcpyAsync(
nModules_Clusters_h.get(), nModules_Clusters_d.get(), 3 * sizeof(uint32_t), hipMemcpyDefault, stream));
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
| c91f35ff336803a02ca29517b75681ef9c5eefb4.cu | /* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
**/
// C++ includes
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
// CUDA includes
#include <cuda_runtime.h>
// CMSSW includes
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "CondFormats/SiPixelObjects/interface/SiPixelROCsStatusAndMapping.h"
#include "DataFormats/FEDRawData/interface/FEDNumbering.h"
#include "DataFormats/TrackerCommon/interface/TrackerTopology.h"
#include "DataFormats/SiPixelDigi/interface/SiPixelDigiConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
namespace pixelgpudetails {
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender(uint32_t maxFedWords) {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(maxFedWords, cudaHostAllocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(maxFedWords, cudaHostAllocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const cms_uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(cms_uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - FEDNumbering::MINSiPixeluTCAFEDID, length / 2);
}
////////////////////
__device__ bool isBarrel(uint32_t rawId) {
return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask));
}
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
// inside frameConversion row: gRow, column: gCol
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
/// row and column in ROC representation
return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t checkROC(uint32_t errorWord,
uint8_t fedId,
uint32_t link,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int stateMatch_bits = 4;
int stateMatch_shift = 8;
uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits);
int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask;
if (stateMatch != 1 && stateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (stateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
uint32_t roc = 1;
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 37:
case 38: {
uint32_t roc = sipixelconstants::getROC(errWord);
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<SiPixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = gpuClustering::invalidModuleId;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = sipixelconstants::getLink(ww); // Extract link
uint32_t roc = sipixelconstants::getROC(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.rawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0;
int side = 0, panel = 0, module = 0;
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
side = (panel == 1) ? -1 : 1;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = sipixelconstants::getCol(ww);
uint32_t row = sipixelconstants::getRow(ww);
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = sipixelconstants::getDCol(ww);
uint32_t pxid = sipixelconstants::getPxId(ww);
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = sipixelconstants::getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ clusInModule,
uint32_t *__restrict__ moduleStart,
uint32_t const *__restrict__ nModules,
uint32_t *__restrict__ nModules_Clusters) {
assert(gpuClustering::maxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to maxHitsInModule()
for (int i = first, iend = gpuClustering::maxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = std::min(gpuClustering::maxHitsInModule(), clusInModule[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::maxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
if (threadIdx.x == 0) {
// copy the number of modules
nModules_Clusters[0] = *nModules;
// last element holds the number of all clusters
nModules_Clusters[1] = moduleStart[gpuClustering::maxNumModules];
// element 96 is the start of BPIX2 (i.e. the number of clusters in BPIX1)
nModules_Clusters[2] = moduleStart[96];
}
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = std::min(gpuClustering::maxHitsInModule(), clusInModule[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::maxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::maxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelClusterThresholds clusterThresholds,
const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
SiPixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
const uint32_t maxFedWords,
bool useQualityInfo,
bool includeErrors,
bool debug,
cudaStream_t stream) {
// we're not opting for calling this function in case of early events
assert(wordCounter != 0);
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << maxFedWords << std::endl;
#endif
// since wordCounter != 0 we're not allocating 0 bytes,
digis_d = SiPixelDigisCUDA(wordCounter, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(wordCounter, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::maxNumModules, stream);
// Begin Raw2Digi block
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
cudaMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), cudaMemcpyDefault, stream));
cudaCheck(cudaMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, cudaMemcpyDefault, stream));
// Launch rawToDigi kernel
RawToDigi_kernel<<<blocks, threadsPerBlock, 0, stream>>>(
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.view().xx(),
digis_d.view().yy(),
digis_d.view().adc(),
digis_d.view().pdigi(),
digis_d.view().rawIdArr(),
digis_d.view().moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(std::max(int(wordCounter), int(gpuClustering::maxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
if (isRun2)
gpuCalibPixel::calibDigis<true><<<blocks, threadsPerBlock, 0, stream>>>(digis_d.view().moduleInd(),
digis_d.view().xx(),
digis_d.view().yy(),
digis_d.view().adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
else
gpuCalibPixel::calibDigis<false><<<blocks, threadsPerBlock, 0, stream>>>(digis_d.view().moduleInd(),
digis_d.view().xx(),
digis_d.view().yy(),
digis_d.view().adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
countModules<<<blocks, threadsPerBlock, 0, stream>>>(
digis_d.view().moduleInd(), clusters_d.moduleStart(), digis_d.view().clus(), wordCounter);
cudaCheck(cudaGetLastError());
threadsPerBlock = 256 + 128; /// should be larger than 6000/16 aka (maxPixInModule/maxiter in the kernel)
blocks = maxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
findClus<<<blocks, threadsPerBlock, 0, stream>>>(digis_d.view().moduleInd(),
digis_d.view().xx(),
digis_d.view().yy(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.view().clus(),
wordCounter);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// apply charge cut
clusterChargeCut<<<blocks, threadsPerBlock, 0, stream>>>(clusterThresholds,
digis_d.view().moduleInd(),
digis_d.view().adc(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.view().clus(),
wordCounter);
cudaCheck(cudaGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
auto nModules_Clusters_d = cms::cuda::make_device_unique<uint32_t[]>(3, stream);
// MUST be ONE block
fillHitsModuleStart<<<1, 1024, 0, stream>>>(
clusters_d.clusInModule(), clusters_d.clusModuleStart(), clusters_d.moduleStart(), nModules_Clusters_d.get());
// copy to host
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(3, stream);
cudaCheck(cudaMemcpyAsync(
nModules_Clusters_h.get(), nModules_Clusters_d.get(), 3 * sizeof(uint32_t), cudaMemcpyDefault, stream));
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
|
60ace1ca8b9f536318fc47c32faaf5415cef225e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "parser.h"
#define BATCH_SIZE 20
/* BATCH_SIZE
*
* Why use BATCH_SIZE ? (Multiple images at once)
*
* 0. Saturate Streaming Multiprocessors with enough computaion BLOCKS
* 1. Saturate Video RAM with enough computaional jobs
*
* CRITERIA:
* - Deploy enough blocks (More than n * SM counts) for latency hiding
* - Saturate each block with enough threads
*/
/* NVIDIA GEFORCE GTX1080
* GPU SPEC:
* - warp_size: 32 threads
* - word_size: 4 Bytes
* - SM_count: 20 Streaming Multiprocessors
*
* SM SPEC:
* - max_warps: 64
* - max_thread_blocks : 32
* - max_threads: 2048
* - max_registers: 65536 words
* - CUDA_cores: 64 cores
* - share_memory: 64 kB
*
* BLOCK SPEC:
* - max_threads: 1024
* - max_registers: 65536 words
*
* THREAD SPEC:
* - max_registers: 255 words
*
* SHARED MEMORY SPEC:
* - 64 kB per SM
* - Composed of 32 memory bank hardwares
* - Does bank interleaving per every word (4 Bytes)
*
*/
/* Memory design
*
* 0. INPUT image data
* => ALL goes into global memory
*
* 1. Filter map data
* => Put as much as we can into constant memory (d_map), but leftover should go to global memory (d_map_spill)
*
* 2. Result data
* => Should go to global memory since write-once
*
* 3. What to cache into shared memory?
* => Bring Filter map data into shared_memory (only necessary part)
* => Bring INPUT data into shared_memory (only necessary part)
*
*/
__constant__ int D_BATCH_SIZE;
__constant__ int D_NUM_TEST;
__constant__ __gpu_map__ d_map;
__device__ float sigmoid(float x)
{
return (1 / (1 + exp(-x)));
}
/*
* ARGUMENTS:
* - curr_step: Which step are we in? (In MAIN_LOOP)
* - stage: Stage number(ex; 1 means C1 layer, 3 means C3 layer)
* - num_output: Number of output maps
* - num_input: Number of input maps
* - height_input: Height of input maps
* - width_input: Width of input maps
* - size_filter: Size of filter map, 5 for LeNet-5
* - d_map + d_map_spill: Contains filter maps for all layers
* - inputs: Source of input images
* - outputs: Destination to store output(computed) images
* - size_input: Length of input 1D array, for fully connected layer
* - size_output: Length of output 1D array, for fully connected layer
*/
__global__ void // Convolution computation kernel
convolution_kernel(
int curr_step, int stage,
int num_output, int num_input, int height_input, int width_input,
int size_filter,
float *inputs, float *outputs
)
{
// Get index info
int BID_x = blockIdx.x; // foreach: output image ~6 or ~16
int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE]
int TID_x = threadIdx.x; // foreach: output image row ~28 or ~10
int TID_y = threadIdx.y; // foreach: output image column ~28 or ~10
float acc = 0;
if (stage == 1)
{// C1_layer convolution: D_BATCH_SIZE * { [1 @ 32 * 32] .X [6 * 1 @ 5 * 5] => [6 @ 28 * 28] }
// Get the starting point from entire MNIST data set
float *input_start = inputs + (curr_step * D_BATCH_SIZE * (32 * 32)) + (BID_y * 32 * 32);
// Load data into shared memory
__shared__ float input[32][32];
// Do shared memory access in 32 stride to avoid shared memory bank conflict
int myCnt = 28 * TID_x + TID_y;
if (myCnt < 32)
{
for (int i = 0; i < 32; i++)
{
input[i][myCnt] = input_start[(32 * i) + myCnt];
}
}
__syncthreads();
__shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict
if (TID_x < size_filter && TID_y < size_filter)
{
filter[TID_x][TID_y] = d_map.C1_param[BID_x][0][TID_x][TID_y];
}
__syncthreads();
for (int f_row = 0; f_row < size_filter; f_row++)
{
for (int f_col = 0; f_col < size_filter; f_col++)
{
acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col];
}
}
outputs[(BID_y * 6 * 28 * 28) + (BID_x * 28 * 28) + (TID_x * 28) + TID_y] = acc;
}
else // Desired stage = 3
{// C3_layer convolution: D_BATCH_SIZE * { [6 @ 14 * 14] .X [16 * 6 @ 5 * 5] => [16 @ 10 * 10] }
// Get the starting point from d_s2_results[BATCH_SIZE]
float *input_start = inputs + (BID_y * (14 * 14));
for (int c = 0; c < num_input; c++)
{// For every input channel, which isn't 1 for C3 layer
// Load data into shared memory
__shared__ float input[14][14];
// Do shared memory access in 14 strides to avoid shared memory bank conflict
int myCnt = 10 * TID_x + TID_y;
if (myCnt < 14)
{
for (int i = 0; i < 14; i++)
{
input[i][myCnt] = input_start[(14 * i) + myCnt];
}
}
__syncthreads();
__shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict
if (TID_x < size_filter && TID_y < size_filter)
{
filter[TID_x][TID_y] = d_map.C3_param[BID_x][c][TID_x][TID_y];
}
__syncthreads();
for (int f_row = 0; f_row < size_filter; f_row++)
{
for (int f_col = 0; f_col < size_filter; f_col++)
{
acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col];
}
}
}
outputs[(BID_y * 16 * 10 * 10) + (BID_x * 10 * 10) + (TID_x * 10) + TID_y];
}
return;
}
__global__ void // Pooling computation kernel
pooling_kernel(
int curr_step, int stage,
int num_output, int height_input, int width_input,
float *inputs, float *outputs
)
{
// Get index info
int BID_x = blockIdx.x; // foreach: output image ~6 or ~16
int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE]
int TID_x = threadIdx.x; // foreach: output image row ~14 or ~5
int TID_y = threadIdx.y; // foreach: output image column ~14 or ~5
float acc = 0;
if (stage == 2)
{// S2_layer pooling: D_BATCH_SIZE * { Sigmoid([6 @ 28 * 28] + bias[6]) => [6 @ 14 * 14] }
// No need to load C1_bias since it will be cached into L1
float *input_start = inputs + (BID_y * 6 * 28 * 28) + (BID_x * 28 * 28);
for (int s_row = 0; s_row < 2; s_row++)
{
for (int s_col = 0; s_col < 2; s_col++)
{
acc += input_start[(28 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4;
}
}
outputs[(BID_y * 6 * 14 * 14) + (BID_x * 14 * 14) + (TID_x * 14) + TID_y] = sigmoid(acc + d_map.C1_bias[BID_x]);
}
else // Desired stage = 4
{// S4_layer pooling: D_BATCH_SIZE * { Sigmoid([16 @ 10 * 10] + bias[16]) => [16 @ 5 * 5] }
// No need to load C3_bias since it will be cached into L1
float *input_start = inputs + (BID_y * 16 * 10 * 10) + (BID_x * 10 * 10);
for (int s_row = 0; s_row < 2; s_row++)
{
for (int s_col = 0; s_col < 2; s_col++)
{
acc += input_start[(10 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4;
}
}
outputs[(BID_y * 16 * 5 * 5) + (BID_x * 5 * 5) + (TID_x * 5) + TID_y] = sigmoid(acc + d_map.C3_bias[BID_x]);
}
return;
}
__global__ void // Fully connecting computation kernel
fullyConnect_kernel(
int curr_step, int stage,
int size_input, int size_output,
__gpu_map_spill__ *d_map_spill,
float *inputs, float *outputs
)
{
// This layer is pretty much simple matrix multipliction of (ex [120][400] X [400][1] => [120][1] )
int BID_x = blockIdx.x; // I will divide [120][140] into 4 segments, to acquire more blocks for latency hiding
int BID_y = blockIdx.y; // Unit position in BATCH_SIZE
int TID_x = threadIdx.x; // Thread ID. threads ~400 or ~120
if (stage == 5)
{// F5_layer full connection: D_BATCH_SIZE * { Sigmoid([120 * 400] X Serial[16 @ 5 * 5] + bias[120 * 1]) => [120 * 1] }
// Load input data into shared memory
// Loading F5_param is unnecessary, since elements in F5_param are only for one-shot use
__shared__ float prod_elementwise[400];
__shared__ float input[400];
if (TID_x < 20)
{// Take 20 strides to avoid shared memory bank conflict
for (int i = 0; i < (400 / 20); i++)
{
input[(i * 20) + TID_x] = inputs[(BID_y * 400) + (i * 20) + TID_x];
}
}
__syncthreads();
for (int i = 0; i < (120 / 4); i++)
{
prod_elementwise[TID_x] = (*d_map_spill).F5_param[((BID_x * (120 / 4)) + i)][TID_x] * input[TID_x];
__syncthreads();
if (TID_x == 0)
{
float prod_sum = 0;
for (int j = 0; j < 400; j++)
{
prod_sum += prod_elementwise[j];
}
outputs[(BID_y * 120) + (BID_x * (120 / 4)) + i] = sigmoid(prod_sum + d_map.F5_bias[(BID_x * (120 / 4) + i)]);
}
}
}
else // Desired stage = 6
{// F6_layer full connection: D_BATCH_SIZE * { Sigmoid([84 * 120] X [120 * 1] + bias[84 * 1]) => [84 * 1] }
// Load input data into shared memory
// Loading F6_param is unnecessary, since elements in F6_param are only for one-shot use
__shared__ float prod_elementwise[120];
__shared__ float input[120];
if (TID_x < 20)
{// Take 20 strides to avoid shared memory bank conflict
for (int i = 0; i < (120 / 20); i++)
{
input[(i * 20) + TID_x] = inputs[(BID_y * 120) + (i * 20) + TID_x];
}
}
__syncthreads();
for (int i = 0; i < (84 / 4); i++)
{
prod_elementwise[TID_x] = d_map.F6_param[(BID_x * (120 / 4)) + i][TID_x] * input[TID_x];
__syncthreads();
if (TID_x == 0)
{
float prod_sum = 0;
for (int j = 0; j < 120; j++)
{
prod_sum += prod_elementwise[j];
}
outputs[(BID_y * 84) + (BID_x * (84 / 4)) + i] = sigmoid(prod_sum + d_map.F6_bias[(BID_x * (84 / 4)) + i]);
}
}
}
return;
}
__global__ void // Output layer compuation kernel
output_kernel(
int curr_step, int stage,
int size_input, int size_output,
__gpu_map_spill__ *d_map_spill,
float *inputs, float *outputs
)
{
// OUTPUT_layer: D_BATCH_SIZE * { [10 * 84] X [84 * 1] + [10 * 1] => [10 * 1] }
// Get index info
int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE]
int TID_x = threadIdx.x; // foreach: elements in a row
// Load data into shared memory
__shared__ float OUTPUT_param[10][84];
if (TID_x < 21)
{
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 4; j++)
{
OUTPUT_param[i][(j * 21) + TID_x] = d_map.OUTPUT_param[i][(j * 21) + TID_x];
}
}
}
__syncthreads();
__shared__ float input[84];
if (TID_x < 21)
{
for (int i = 0; i < 4; i++)
{
input[(i * 21) + TID_x] = inputs[(BID_y * 84) + (i * 21) + TID_x];
}
}
__syncthreads();
__shared__ float prod_elementwise[84];
for (int i = 0; i < 10; i++)
{
prod_elementwise[TID_x] = OUTPUT_param[i][TID_x] * input[TID_x];
__syncthreads();
if (TID_x == 0)
{
float prod_sum = 0;
for (int j = 0; j < 84; j++)
{
prod_sum += prod_elementwise[j];
}
outputs[(curr_step * D_BATCH_SIZE * 10) + (BID_y * 10) + i] = prod_sum + d_map.OUTPUT_bias[i];
}
}
return;
}
__global__ void // Number determination kernel
numberDetermine_kernel(
int curr_step, int stage,
float *inputs, int *outputs
)
{
// NUMBER_layer: D_NUM_TEST * { ReduceMax[10 * 1] => SINGLE_DIGIT }
// Get index info
int BID_x = blockIdx.x; // 100
int TID_x = threadIdx.x; // 100
int index_image = (BID_x * 100) + TID_x;
float highest_prob = inputs[(index_image * 10) + 0];
int ans = 0;
for (int i = 1; i < 10; i++)
{
if (inputs[(index_image * 10) + i] > highest_prob)
{
highest_prob = inputs[(index_image * 10) + i];
ans = i;
}
}
outputs[index_image] = ans;
return;
}
void forward_GPU(float **ptr_test_data, int **ptr_test_label, __map__ *map, int *cnt_correct)
{// Deploy forward computation job on GPU
float *test_data = *ptr_test_data;
int *test_label = *ptr_test_label;
// Acquire memory space in GPU
// Prefix "d_" means ADDRESS in device memory
// Handlers for device memory manipulation
int *inferences = malloc(sizeof(int) * NUM_TEST);
int *d_inferences;
float *d_test_data;
__gpu_map_spill__ *d_map_spill;
float *d_c1_results;
float *d_s2_results;
float *d_c3_results;
float *d_s4_results;
float *d_f5_results;
float *d_f6_results;
float *d_output_results;
// WARNING: MALLOC 1
__gpu_map__ *tmp_map = malloc(sizeof(__gpu_map__));
__gpu_map_spill__ *tmp_map_spill = malloc(sizeof(__gpu_map_spill__));
assert(tmp_map != NULL && "MALLOC FAILED!\n");
assert(tmp_map_spill != NULL && "MALLOC FAILED!\n");
// Fill in gpu_map data
// tmp_map = map - F5_param
memcpy((*tmp_map).C1_param, (*map).C1_param, sizeof(float) * 6 * 1 * 5 * 5);
memcpy((*tmp_map).C1_bias, (*map).C1_bias, sizeof(float) * 6);
memcpy((*tmp_map).C3_param, (*map).C3_param, sizeof(float) * 16 * 6 * 5 * 5);
memcpy((*tmp_map).C3_bias, (*map).C3_bias, sizeof(float) * 16);
memcpy((*tmp_map).F5_bias, (*map).F5_bias, sizeof(float) * 120);
memcpy((*tmp_map).F6_param, (*map).F6_param, sizeof(float) * 84 * 120);
memcpy((*tmp_map).F6_bias, (*map).F6_bias, sizeof(float) * 84);
memcpy((*tmp_map).OUTPUT_param, (*map).OUTPUT_param, sizeof(float) * 10 * 84);
memcpy((*tmp_map).OUTPUT_bias, (*map).OUTPUT_bias, sizeof(float) * 10);
// tmp_map_spill = F5 param
memcpy((*tmp_map_spill).F5_param, (*map).F5_param, sizeof(float) * 120 * 400);
// Fix NUM_TEST into d_NUM_TEST so d_NUM_TEST can be multiple of BATCH_SIZE, so we can walk in stride
int d_NUM_TEST = ((int) ceil((double) ((float) NUM_TEST / (float) BATCH_SIZE))) * BATCH_SIZE;
int batch_size = BATCH_SIZE;
// WARNING: MALLOC 0
hipMalloc((void **) &d_inferences, sizeof(int) * NUM_TEST);
hipMalloc((void **) &d_test_data, sizeof(float) * NUM_TEST * 32 * 32);
hipMalloc((void **) &d_map_spill, sizeof(__gpu_map_spill__));
hipMalloc((void **) &d_c1_results, sizeof(float) * BATCH_SIZE * 6 * 28 * 28);
hipMalloc((void **) &d_s2_results, sizeof(float) * BATCH_SIZE * 6 * 14 * 14);
hipMalloc((void **) &d_c3_results, sizeof(float) * BATCH_SIZE * 16 * 10 * 10);
hipMalloc((void **) &d_s4_results, sizeof(float) * BATCH_SIZE * 16 * 5 * 5);
hipMalloc((void **) &d_f5_results, sizeof(float) * BATCH_SIZE * 120);
hipMalloc((void **) &d_f6_results, sizeof(float) * BATCH_SIZE * 84);
hipMalloc((void **) &d_output_results, sizeof(float) * NUM_TEST * 10);
// CUDA memcpy from host to device
//hipMemcpyToSymbol(D_NUM_TEST, &d_NUM_TEST, sizeof(int), 0, hipMemcpyHostToDevice);
//hipMemcpyToSymbol(D_BATCH_SIZE, &batch_size, sizeof(int), 0, hipMemcpyHostToDevice);
//hipMemcpyToSymbol(d_map, tmp_map, sizeof(__gpu_map__), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(D_NUM_TEST, (void *) &d_NUM_TEST, sizeof(int));
hipMemcpyToSymbol(D_BATCH_SIZE, (void *) &batch_size, sizeof(int));
hipMemcpyToSymbol(d_map, (void *) tmp_map, sizeof(__gpu_map__));
hipMemcpy(d_map_spill, tmp_map_spill, sizeof(__gpu_map_spill__), 0, hipMemcpyHostToDevice);
// WARNING: FREE 1
free(tmp_map);
free(tmp_map_spill);
// ENTERING MAIN LOOP
int step = 0;
dim3 block;
dim3 thread;
for (int step = 0; (step * BATCH_SIZE) < d_NUM_TEST; step++)
{// Advance step by step, with BATCH_SIZE stride
// 0. Convolution layer C1
block.x = 6;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 28;
thread.y = 28;
thread.z = 1;
hipLaunchKernelGGL(( convolution_kernel), dim3(block), dim3(thread), 0, 0, step, 1, 6, 1, 32, 32, 5, d_test_data, d_c1_results);
// 1. Pooling layer S2
block.x = 6;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 14;
thread.y = 14;
thread.z = 1;
hipLaunchKernelGGL(( pooling_kernel), dim3(block), dim3(thread), 0, 0, step, 2, 6, 28, 28, d_c1_results, d_s2_results);
// 2. Convolution layer C3
block.x = 16;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 10;
thread.y = 10;
thread.z = 1;
hipLaunchKernelGGL(( convolution_kernel), dim3(block), dim3(thread), 0, 0, step, 3, 16, 6, 14, 14, 5, d_s2_results, d_c3_results);
// 3. Pooling layer S4
block.x = 16;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 5;
thread.y = 5;
thread.z = 1;
hipLaunchKernelGGL(( pooling_kernel), dim3(block), dim3(thread), 0, 0, step, 4, 16, 10, 10, d_c3_results, d_s4_results);
// 4. Fully connected layer F5
block.x = 4;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 400;
thread.y = 1;
thread.z = 1;
hipLaunchKernelGGL(( fullyConnect_kernel), dim3(block), dim3(thread), 0, 0, step, 5, 400, 120, d_map_spill, d_s4_results, d_f5_results);
// 5. Fully connected layer F6
block.x = 4;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 120;
thread.y = 1;
thread.z = 1;
hipLaunchKernelGGL(( fullyConnect_kernel), dim3(block), dim3(kernel), 0, 0, step, 6, 120, 84, d_map_spill, d_f5_results, d_f6_results);
// 6. Output layer OUTPUT
block.x = 1;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 84;
thread.y = 1;
thread.z = 1;
hipLaunchKernelGGL(( output_kernel), dim3(block), dim3(kernel), 0, 0, step, 7, 84, 10, d_map_spill, d_f6_results, d_output_results);
}
// 7. Determine numbers
block.x = 100;
block.y = 1;
block.z = 1;
thread.x = 100;
thread.y = 1;
thread.z = 1;
hipLaunchKernelGGL(( numberDetermine_kernel), dim3(block), dim3(thread), 0, 0, 8, 8, d_output_results, d_inferences);
// 8. Copy inference answers to Host
hipMemcpy(inferences, d_inferences, sizeof(int) * NUM_TEST, hipMemcpyDeviceToHost);
// 9. Scoring
for (int i = 0; i < NUM_TEST; i++)
{
if (inference[i] == test_label[i])
{
(*cnt_correct)++;
}
}
// WARNING: FREE 0
free(inferences);
hipFree(d_inferences);
hipFree(d_map_spill);
hipFree(d_test_data);
hipFree(d_c1_results);
hipFree(d_s2_results);
hipFree(d_c3_results);
hipFree(d_s4_results);
hipFree(d_f5_results);
hipFree(d_f6_results);
hipFree(d_output_results);
return;
}
| 60ace1ca8b9f536318fc47c32faaf5415cef225e.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "parser.h"
#define BATCH_SIZE 20
/* BATCH_SIZE
*
* Why use BATCH_SIZE ? (Multiple images at once)
*
* 0. Saturate Streaming Multiprocessors with enough computaion BLOCKS
* 1. Saturate Video RAM with enough computaional jobs
*
* CRITERIA:
* - Deploy enough blocks (More than n * SM counts) for latency hiding
* - Saturate each block with enough threads
*/
/* NVIDIA GEFORCE GTX1080
* GPU SPEC:
* - warp_size: 32 threads
* - word_size: 4 Bytes
* - SM_count: 20 Streaming Multiprocessors
*
* SM SPEC:
* - max_warps: 64
* - max_thread_blocks : 32
* - max_threads: 2048
* - max_registers: 65536 words
* - CUDA_cores: 64 cores
* - share_memory: 64 kB
*
* BLOCK SPEC:
* - max_threads: 1024
* - max_registers: 65536 words
*
* THREAD SPEC:
* - max_registers: 255 words
*
* SHARED MEMORY SPEC:
* - 64 kB per SM
* - Composed of 32 memory bank hardwares
* - Does bank interleaving per every word (4 Bytes)
*
*/
/* Memory design
*
* 0. INPUT image data
* => ALL goes into global memory
*
* 1. Filter map data
* => Put as much as we can into constant memory (d_map), but leftover should go to global memory (d_map_spill)
*
* 2. Result data
* => Should go to global memory since write-once
*
* 3. What to cache into shared memory?
* => Bring Filter map data into shared_memory (only necessary part)
* => Bring INPUT data into shared_memory (only necessary part)
*
*/
__constant__ int D_BATCH_SIZE;
__constant__ int D_NUM_TEST;
__constant__ __gpu_map__ d_map;
__device__ float sigmoid(float x)
{
return (1 / (1 + exp(-x)));
}
/*
* ARGUMENTS:
* - curr_step: Which step are we in? (In MAIN_LOOP)
* - stage: Stage number(ex; 1 means C1 layer, 3 means C3 layer)
* - num_output: Number of output maps
* - num_input: Number of input maps
* - height_input: Height of input maps
* - width_input: Width of input maps
* - size_filter: Size of filter map, 5 for LeNet-5
* - d_map + d_map_spill: Contains filter maps for all layers
* - inputs: Source of input images
* - outputs: Destination to store output(computed) images
* - size_input: Length of input 1D array, for fully connected layer
* - size_output: Length of output 1D array, for fully connected layer
*/
__global__ void // Convolution computation kernel
convolution_kernel(
int curr_step, int stage,
int num_output, int num_input, int height_input, int width_input,
int size_filter,
float *inputs, float *outputs
)
{
// Get index info
int BID_x = blockIdx.x; // foreach: output image ~6 or ~16
int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE]
int TID_x = threadIdx.x; // foreach: output image row ~28 or ~10
int TID_y = threadIdx.y; // foreach: output image column ~28 or ~10
float acc = 0;
if (stage == 1)
{// C1_layer convolution: D_BATCH_SIZE * { [1 @ 32 * 32] .X [6 * 1 @ 5 * 5] => [6 @ 28 * 28] }
// Get the starting point from entire MNIST data set
float *input_start = inputs + (curr_step * D_BATCH_SIZE * (32 * 32)) + (BID_y * 32 * 32);
// Load data into shared memory
__shared__ float input[32][32];
// Do shared memory access in 32 stride to avoid shared memory bank conflict
int myCnt = 28 * TID_x + TID_y;
if (myCnt < 32)
{
for (int i = 0; i < 32; i++)
{
input[i][myCnt] = input_start[(32 * i) + myCnt];
}
}
__syncthreads();
__shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict
if (TID_x < size_filter && TID_y < size_filter)
{
filter[TID_x][TID_y] = d_map.C1_param[BID_x][0][TID_x][TID_y];
}
__syncthreads();
for (int f_row = 0; f_row < size_filter; f_row++)
{
for (int f_col = 0; f_col < size_filter; f_col++)
{
acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col];
}
}
outputs[(BID_y * 6 * 28 * 28) + (BID_x * 28 * 28) + (TID_x * 28) + TID_y] = acc;
}
else // Desired stage = 3
{// C3_layer convolution: D_BATCH_SIZE * { [6 @ 14 * 14] .X [16 * 6 @ 5 * 5] => [16 @ 10 * 10] }
// Get the starting point from d_s2_results[BATCH_SIZE]
float *input_start = inputs + (BID_y * (14 * 14));
for (int c = 0; c < num_input; c++)
{// For every input channel, which isn't 1 for C3 layer
// Load data into shared memory
__shared__ float input[14][14];
// Do shared memory access in 14 strides to avoid shared memory bank conflict
int myCnt = 10 * TID_x + TID_y;
if (myCnt < 14)
{
for (int i = 0; i < 14; i++)
{
input[i][myCnt] = input_start[(14 * i) + myCnt];
}
}
__syncthreads();
__shared__ float filter[5][5]; // Only 25 entries -> No shared memory bank conflict
if (TID_x < size_filter && TID_y < size_filter)
{
filter[TID_x][TID_y] = d_map.C3_param[BID_x][c][TID_x][TID_y];
}
__syncthreads();
for (int f_row = 0; f_row < size_filter; f_row++)
{
for (int f_col = 0; f_col < size_filter; f_col++)
{
acc += input[TID_x + f_row][TID_y + f_col] * filter[f_row][f_col];
}
}
}
outputs[(BID_y * 16 * 10 * 10) + (BID_x * 10 * 10) + (TID_x * 10) + TID_y];
}
return;
}
__global__ void // Pooling computation kernel
pooling_kernel(
int curr_step, int stage,
int num_output, int height_input, int width_input,
float *inputs, float *outputs
)
{
// Get index info
int BID_x = blockIdx.x; // foreach: output image ~6 or ~16
int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE]
int TID_x = threadIdx.x; // foreach: output image row ~14 or ~5
int TID_y = threadIdx.y; // foreach: output image column ~14 or ~5
float acc = 0;
if (stage == 2)
{// S2_layer pooling: D_BATCH_SIZE * { Sigmoid([6 @ 28 * 28] + bias[6]) => [6 @ 14 * 14] }
// No need to load C1_bias since it will be cached into L1
float *input_start = inputs + (BID_y * 6 * 28 * 28) + (BID_x * 28 * 28);
for (int s_row = 0; s_row < 2; s_row++)
{
for (int s_col = 0; s_col < 2; s_col++)
{
acc += input_start[(28 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4;
}
}
outputs[(BID_y * 6 * 14 * 14) + (BID_x * 14 * 14) + (TID_x * 14) + TID_y] = sigmoid(acc + d_map.C1_bias[BID_x]);
}
else // Desired stage = 4
{// S4_layer pooling: D_BATCH_SIZE * { Sigmoid([16 @ 10 * 10] + bias[16]) => [16 @ 5 * 5] }
// No need to load C3_bias since it will be cached into L1
float *input_start = inputs + (BID_y * 16 * 10 * 10) + (BID_x * 10 * 10);
for (int s_row = 0; s_row < 2; s_row++)
{
for (int s_col = 0; s_col < 2; s_col++)
{
acc += input_start[(10 * (2 * TID_x + s_row)) + (2 * TID_y + s_col)] / 4;
}
}
outputs[(BID_y * 16 * 5 * 5) + (BID_x * 5 * 5) + (TID_x * 5) + TID_y] = sigmoid(acc + d_map.C3_bias[BID_x]);
}
return;
}
__global__ void // Fully connecting computation kernel
fullyConnect_kernel(
int curr_step, int stage,
int size_input, int size_output,
__gpu_map_spill__ *d_map_spill,
float *inputs, float *outputs
)
{
// This layer is pretty much simple matrix multipliction of (ex [120][400] X [400][1] => [120][1] )
int BID_x = blockIdx.x; // I will divide [120][140] into 4 segments, to acquire more blocks for latency hiding
int BID_y = blockIdx.y; // Unit position in BATCH_SIZE
int TID_x = threadIdx.x; // Thread ID. threads ~400 or ~120
if (stage == 5)
{// F5_layer full connection: D_BATCH_SIZE * { Sigmoid([120 * 400] X Serial[16 @ 5 * 5] + bias[120 * 1]) => [120 * 1] }
// Load input data into shared memory
// Loading F5_param is unnecessary, since elements in F5_param are only for one-shot use
__shared__ float prod_elementwise[400];
__shared__ float input[400];
if (TID_x < 20)
{// Take 20 strides to avoid shared memory bank conflict
for (int i = 0; i < (400 / 20); i++)
{
input[(i * 20) + TID_x] = inputs[(BID_y * 400) + (i * 20) + TID_x];
}
}
__syncthreads();
for (int i = 0; i < (120 / 4); i++)
{
prod_elementwise[TID_x] = (*d_map_spill).F5_param[((BID_x * (120 / 4)) + i)][TID_x] * input[TID_x];
__syncthreads();
if (TID_x == 0)
{
float prod_sum = 0;
for (int j = 0; j < 400; j++)
{
prod_sum += prod_elementwise[j];
}
outputs[(BID_y * 120) + (BID_x * (120 / 4)) + i] = sigmoid(prod_sum + d_map.F5_bias[(BID_x * (120 / 4) + i)]);
}
}
}
else // Desired stage = 6
{// F6_layer full connection: D_BATCH_SIZE * { Sigmoid([84 * 120] X [120 * 1] + bias[84 * 1]) => [84 * 1] }
// Load input data into shared memory
// Loading F6_param is unnecessary, since elements in F6_param are only for one-shot use
__shared__ float prod_elementwise[120];
__shared__ float input[120];
if (TID_x < 20)
{// Take 20 strides to avoid shared memory bank conflict
for (int i = 0; i < (120 / 20); i++)
{
input[(i * 20) + TID_x] = inputs[(BID_y * 120) + (i * 20) + TID_x];
}
}
__syncthreads();
for (int i = 0; i < (84 / 4); i++)
{
prod_elementwise[TID_x] = d_map.F6_param[(BID_x * (120 / 4)) + i][TID_x] * input[TID_x];
__syncthreads();
if (TID_x == 0)
{
float prod_sum = 0;
for (int j = 0; j < 120; j++)
{
prod_sum += prod_elementwise[j];
}
outputs[(BID_y * 84) + (BID_x * (84 / 4)) + i] = sigmoid(prod_sum + d_map.F6_bias[(BID_x * (84 / 4)) + i]);
}
}
}
return;
}
__global__ void // Output layer compuation kernel
output_kernel(
int curr_step, int stage,
int size_input, int size_output,
__gpu_map_spill__ *d_map_spill,
float *inputs, float *outputs
)
{
// OUTPUT_layer: D_BATCH_SIZE * { [10 * 84] X [84 * 1] + [10 * 1] => [10 * 1] }
// Get index info
int BID_y = blockIdx.y; // foreach: BATCH among curr_step_inputs[BATCH_SIZE]
int TID_x = threadIdx.x; // foreach: elements in a row
// Load data into shared memory
__shared__ float OUTPUT_param[10][84];
if (TID_x < 21)
{
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 4; j++)
{
OUTPUT_param[i][(j * 21) + TID_x] = d_map.OUTPUT_param[i][(j * 21) + TID_x];
}
}
}
__syncthreads();
__shared__ float input[84];
if (TID_x < 21)
{
for (int i = 0; i < 4; i++)
{
input[(i * 21) + TID_x] = inputs[(BID_y * 84) + (i * 21) + TID_x];
}
}
__syncthreads();
__shared__ float prod_elementwise[84];
for (int i = 0; i < 10; i++)
{
prod_elementwise[TID_x] = OUTPUT_param[i][TID_x] * input[TID_x];
__syncthreads();
if (TID_x == 0)
{
float prod_sum = 0;
for (int j = 0; j < 84; j++)
{
prod_sum += prod_elementwise[j];
}
outputs[(curr_step * D_BATCH_SIZE * 10) + (BID_y * 10) + i] = prod_sum + d_map.OUTPUT_bias[i];
}
}
return;
}
__global__ void // Number determination kernel
numberDetermine_kernel(
int curr_step, int stage,
float *inputs, int *outputs
)
{
// NUMBER_layer: D_NUM_TEST * { ReduceMax[10 * 1] => SINGLE_DIGIT }
// Get index info
int BID_x = blockIdx.x; // 100
int TID_x = threadIdx.x; // 100
int index_image = (BID_x * 100) + TID_x;
float highest_prob = inputs[(index_image * 10) + 0];
int ans = 0;
for (int i = 1; i < 10; i++)
{
if (inputs[(index_image * 10) + i] > highest_prob)
{
highest_prob = inputs[(index_image * 10) + i];
ans = i;
}
}
outputs[index_image] = ans;
return;
}
void forward_GPU(float **ptr_test_data, int **ptr_test_label, __map__ *map, int *cnt_correct)
{// Deploy forward computation job on GPU
float *test_data = *ptr_test_data;
int *test_label = *ptr_test_label;
// Acquire memory space in GPU
// Prefix "d_" means ADDRESS in device memory
// Handlers for device memory manipulation
int *inferences = malloc(sizeof(int) * NUM_TEST);
int *d_inferences;
float *d_test_data;
__gpu_map_spill__ *d_map_spill;
float *d_c1_results;
float *d_s2_results;
float *d_c3_results;
float *d_s4_results;
float *d_f5_results;
float *d_f6_results;
float *d_output_results;
// WARNING: MALLOC 1
__gpu_map__ *tmp_map = malloc(sizeof(__gpu_map__));
__gpu_map_spill__ *tmp_map_spill = malloc(sizeof(__gpu_map_spill__));
assert(tmp_map != NULL && "MALLOC FAILED!\n");
assert(tmp_map_spill != NULL && "MALLOC FAILED!\n");
// Fill in gpu_map data
// tmp_map = map - F5_param
memcpy((*tmp_map).C1_param, (*map).C1_param, sizeof(float) * 6 * 1 * 5 * 5);
memcpy((*tmp_map).C1_bias, (*map).C1_bias, sizeof(float) * 6);
memcpy((*tmp_map).C3_param, (*map).C3_param, sizeof(float) * 16 * 6 * 5 * 5);
memcpy((*tmp_map).C3_bias, (*map).C3_bias, sizeof(float) * 16);
memcpy((*tmp_map).F5_bias, (*map).F5_bias, sizeof(float) * 120);
memcpy((*tmp_map).F6_param, (*map).F6_param, sizeof(float) * 84 * 120);
memcpy((*tmp_map).F6_bias, (*map).F6_bias, sizeof(float) * 84);
memcpy((*tmp_map).OUTPUT_param, (*map).OUTPUT_param, sizeof(float) * 10 * 84);
memcpy((*tmp_map).OUTPUT_bias, (*map).OUTPUT_bias, sizeof(float) * 10);
// tmp_map_spill = F5 param
memcpy((*tmp_map_spill).F5_param, (*map).F5_param, sizeof(float) * 120 * 400);
// Fix NUM_TEST into d_NUM_TEST so d_NUM_TEST can be multiple of BATCH_SIZE, so we can walk in stride
int d_NUM_TEST = ((int) ceil((double) ((float) NUM_TEST / (float) BATCH_SIZE))) * BATCH_SIZE;
int batch_size = BATCH_SIZE;
// WARNING: MALLOC 0
cudaMalloc((void **) &d_inferences, sizeof(int) * NUM_TEST);
cudaMalloc((void **) &d_test_data, sizeof(float) * NUM_TEST * 32 * 32);
cudaMalloc((void **) &d_map_spill, sizeof(__gpu_map_spill__));
cudaMalloc((void **) &d_c1_results, sizeof(float) * BATCH_SIZE * 6 * 28 * 28);
cudaMalloc((void **) &d_s2_results, sizeof(float) * BATCH_SIZE * 6 * 14 * 14);
cudaMalloc((void **) &d_c3_results, sizeof(float) * BATCH_SIZE * 16 * 10 * 10);
cudaMalloc((void **) &d_s4_results, sizeof(float) * BATCH_SIZE * 16 * 5 * 5);
cudaMalloc((void **) &d_f5_results, sizeof(float) * BATCH_SIZE * 120);
cudaMalloc((void **) &d_f6_results, sizeof(float) * BATCH_SIZE * 84);
cudaMalloc((void **) &d_output_results, sizeof(float) * NUM_TEST * 10);
// CUDA memcpy from host to device
//cudaMemcpyToSymbol(D_NUM_TEST, &d_NUM_TEST, sizeof(int), 0, cudaMemcpyHostToDevice);
//cudaMemcpyToSymbol(D_BATCH_SIZE, &batch_size, sizeof(int), 0, cudaMemcpyHostToDevice);
//cudaMemcpyToSymbol(d_map, tmp_map, sizeof(__gpu_map__), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(D_NUM_TEST, (void *) &d_NUM_TEST, sizeof(int));
cudaMemcpyToSymbol(D_BATCH_SIZE, (void *) &batch_size, sizeof(int));
cudaMemcpyToSymbol(d_map, (void *) tmp_map, sizeof(__gpu_map__));
cudaMemcpy(d_map_spill, tmp_map_spill, sizeof(__gpu_map_spill__), 0, cudaMemcpyHostToDevice);
// WARNING: FREE 1
free(tmp_map);
free(tmp_map_spill);
// ENTERING MAIN LOOP
int step = 0;
dim3 block;
dim3 thread;
for (int step = 0; (step * BATCH_SIZE) < d_NUM_TEST; step++)
{// Advance step by step, with BATCH_SIZE stride
// 0. Convolution layer C1
block.x = 6;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 28;
thread.y = 28;
thread.z = 1;
convolution_kernel<<<block, thread>>>(step, 1, 6, 1, 32, 32, 5, d_test_data, d_c1_results);
// 1. Pooling layer S2
block.x = 6;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 14;
thread.y = 14;
thread.z = 1;
pooling_kernel<<<block, thread>>>(step, 2, 6, 28, 28, d_c1_results, d_s2_results);
// 2. Convolution layer C3
block.x = 16;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 10;
thread.y = 10;
thread.z = 1;
convolution_kernel<<<block, thread>>>(step, 3, 16, 6, 14, 14, 5, d_s2_results, d_c3_results);
// 3. Pooling layer S4
block.x = 16;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 5;
thread.y = 5;
thread.z = 1;
pooling_kernel<<<block, thread>>>(step, 4, 16, 10, 10, d_c3_results, d_s4_results);
// 4. Fully connected layer F5
block.x = 4;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 400;
thread.y = 1;
thread.z = 1;
fullyConnect_kernel<<<block, thread>>>(step, 5, 400, 120, d_map_spill, d_s4_results, d_f5_results);
// 5. Fully connected layer F6
block.x = 4;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 120;
thread.y = 1;
thread.z = 1;
fullyConnect_kernel<<<block, kernel>>>(step, 6, 120, 84, d_map_spill, d_f5_results, d_f6_results);
// 6. Output layer OUTPUT
block.x = 1;
block.y = BATCH_SIZE;
block.z = 1;
thread.x = 84;
thread.y = 1;
thread.z = 1;
output_kernel<<<block, kernel>>>(step, 7, 84, 10, d_map_spill, d_f6_results, d_output_results);
}
// 7. Determine numbers
block.x = 100;
block.y = 1;
block.z = 1;
thread.x = 100;
thread.y = 1;
thread.z = 1;
numberDetermine_kernel<<<block, thread>>>(8, 8, d_output_results, d_inferences);
// 8. Copy inference answers to Host
cudaMemcpy(inferences, d_inferences, sizeof(int) * NUM_TEST, cudaMemcpyDeviceToHost);
// 9. Scoring
for (int i = 0; i < NUM_TEST; i++)
{
if (inference[i] == test_label[i])
{
(*cnt_correct)++;
}
}
// WARNING: FREE 0
free(inferences);
cudaFree(d_inferences);
cudaFree(d_map_spill);
cudaFree(d_test_data);
cudaFree(d_c1_results);
cudaFree(d_s2_results);
cudaFree(d_c3_results);
cudaFree(d_s4_results);
cudaFree(d_f5_results);
cudaFree(d_f6_results);
cudaFree(d_output_results);
return;
}
|
e13a36bda2152499a498fd7e8fbb3b9afc6c8be7.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
using framework::AlgorithmsCache;
static inline void GetNCDHW(const framework::DDim& dims,
const DataLayout& layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
*C = layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1];
int i = layout == DataLayout::kNCHW ? 0 : 1;
if (dims.size() == 5) {
*D = dims[2 - i];
*H = dims[3 - i];
*W = dims[4 - i];
} else {
*D = 1;
*H = dims[2 - i];
*W = dims[3 - i];
}
}
template <typename DeviceContext, typename T, size_t D>
static void Slice_2(const framework::ExecutionContext& context,
const Tensor* input, Tensor* out,
const std::vector<int>& starts,
const std::vector<int>& axes) {
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto in_dims = input->dims();
auto new_out_dims = out->dims();
auto offsets = Eigen::array<int, D>();
auto extents = Eigen::array<int, D>();
for (size_t i = 0; i < D; ++i) {
offsets[i] = 0;
extents[i] = new_out_dims[i];
}
int start;
for (size_t i = 0; i < axes.size(); ++i) {
start = starts[i];
if (start < 0) {
start = (start + in_dims[axes[i]]);
}
start = ::max(start, 0);
offsets[axes[i]] = start;
}
auto in_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*input);
auto out_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*out, new_out_dims);
out_t.device(place) = in_t.slice(offsets, extents);
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
if (exhaustive_search && FLAGS_cudnn_deterministic) {
PADDLE_THROW(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
T* output_data = nullptr;
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel = *input;
transformed_output = *output;
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = filter->data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input, filter, &transformed_output, strides,
padding_common, dilations};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (transformed_input_channel.dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount(
args.cdesc.desc(), groups));
groups = 1;
#endif
args.idesc.set(transformed_input, groups);
args.wdesc.set(*filter, layout_format, groups);
args.odesc.set(transformed_output, groups);
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, 0, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(), algo,
workspace_ptr, workspace_size, &beta, args.odesc.desc(),
output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
} else {
transformed_input_channel = *input;
transformed_output_grad_channel = *output_grad;
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
filter,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
ConvArgs args2{&transformed_input,
filter_grad,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
auto handle = dev_ctx.cudnn_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups, c_groups;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, iwo_groups);
args1.wdesc.set(*filter, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, iwo_groups);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, 0, ctx);
workspace_size =
::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = filter_grad->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, iwo_groups);
args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups);
args2.odesc.set(transformed_output_grad_channel, iwo_groups);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, 1, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out, args1.cdesc.desc(),
data_algo, cudnn_workspace_ptr, workspace_size, &beta,
args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out, args2.cdesc.desc(),
filter_algo, cudnn_workspace_ptr, workspace_size, &beta,
args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX, W,
&transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides,
padding_common, dilations};
ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides,
padding_common, dilations};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, 0, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, 0, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, 1, ctx);
workspace_size = ::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, 2, ctx);
workspace_size =
::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1,
workspace_ptr, workspace_size, &beta, args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(), x + i * group_offset_in,
args2.wdesc.desc(), ddw + i * group_offset_filter,
args2.cdesc.desc(), fwd_algo2, workspace_ptr,
workspace_size, &alpha, args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size,
&beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
| e13a36bda2152499a498fd7e8fbb3b9afc6c8be7.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
using framework::AlgorithmsCache;
static inline void GetNCDHW(const framework::DDim& dims,
const DataLayout& layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
*C = layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1];
int i = layout == DataLayout::kNCHW ? 0 : 1;
if (dims.size() == 5) {
*D = dims[2 - i];
*H = dims[3 - i];
*W = dims[4 - i];
} else {
*D = 1;
*H = dims[2 - i];
*W = dims[3 - i];
}
}
template <typename DeviceContext, typename T, size_t D>
static void Slice_2(const framework::ExecutionContext& context,
const Tensor* input, Tensor* out,
const std::vector<int>& starts,
const std::vector<int>& axes) {
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto in_dims = input->dims();
auto new_out_dims = out->dims();
auto offsets = Eigen::array<int, D>();
auto extents = Eigen::array<int, D>();
for (size_t i = 0; i < D; ++i) {
offsets[i] = 0;
extents[i] = new_out_dims[i];
}
int start;
for (size_t i = 0; i < axes.size(); ++i) {
start = starts[i];
if (start < 0) {
start = (start + in_dims[axes[i]]);
}
start = std::max(start, 0);
offsets[axes[i]] = start;
}
auto in_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*input);
auto out_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*out, new_out_dims);
out_t.device(place) = in_t.slice(offsets, extents);
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
if (exhaustive_search && FLAGS_cudnn_deterministic) {
PADDLE_THROW(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
T* output_data = nullptr;
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel = *input;
transformed_output = *output;
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = filter->data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input, filter, &transformed_output, strides,
padding_common, dilations};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (transformed_input_channel.dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount(
args.cdesc.desc(), groups));
groups = 1;
#endif
args.idesc.set(transformed_input, groups);
args.wdesc.set(*filter, layout_format, groups);
args.odesc.set(transformed_output, groups);
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, 0, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(), algo,
workspace_ptr, workspace_size, &beta, args.odesc.desc(),
output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
} else {
transformed_input_channel = *input;
transformed_output_grad_channel = *output_grad;
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
filter,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
ConvArgs args2{&transformed_input,
filter_grad,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
auto handle = dev_ctx.cudnn_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups, c_groups;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, iwo_groups);
args1.wdesc.set(*filter, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, iwo_groups);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, 0, ctx);
workspace_size =
std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = filter_grad->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, iwo_groups);
args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups);
args2.odesc.set(transformed_output_grad_channel, iwo_groups);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, 1, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out, args1.cdesc.desc(),
data_algo, cudnn_workspace_ptr, workspace_size, &beta,
args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out, args2.cdesc.desc(),
filter_algo, cudnn_workspace_ptr, workspace_size, &beta,
args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX, W,
&transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides,
padding_common, dilations};
ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides,
padding_common, dilations};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, 0, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, 0, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, 1, ctx);
workspace_size = std::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, 2, ctx);
workspace_size =
std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1,
workspace_ptr, workspace_size, &beta, args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(), x + i * group_offset_in,
args2.wdesc.desc(), ddw + i * group_offset_filter,
args2.cdesc.desc(), fwd_algo2, workspace_ptr,
workspace_size, &alpha, args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size,
&beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
|
6cf58b2433c8461c461406f7b46c48b5a167dbed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.cuh"
#include "bp_hip.cuh"
#include "cuptens.h"
void cubp_split_pack(cuftens *src, cuptens *dst)
{
int D=src->D, Ns=src->MNL, Nd=dst->X*2;
dim3 grid(D, CEIL(Ns, 32));
dim3 block(1, 32);
hipLaunchKernelGGL(( ker_bpsplit <8>) , dim3(grid), dim3(block), 0, 0,
src->data, (uint32_t *)dst->data, Ns, Nd);
}
void cubp_merge(cuftens *src, cuftens *dst, cuftens *fix, float norm)
{
const int D=src->D, N=src->MNL/8;
dim3 grid(D, CEIL(N, 32));
dim3 block(8, 32);
hipLaunchKernelGGL(( ker_bpmerge <8>) , dim3(grid), dim3(block), 0, 0,
src->data, dst->data, fix->data, norm, N, fix->N);
}
| 6cf58b2433c8461c461406f7b46c48b5a167dbed.cu | #include "util.cuh"
#include "bp.cuh"
#include "cuptens.h"
void cubp_split_pack(cuftens *src, cuptens *dst)
{
int D=src->D, Ns=src->MNL, Nd=dst->X*2;
dim3 grid(D, CEIL(Ns, 32));
dim3 block(1, 32);
ker_bpsplit <8> <<<grid, block>>>
(src->data, (uint32_t *)dst->data, Ns, Nd);
}
void cubp_merge(cuftens *src, cuftens *dst, cuftens *fix, float norm)
{
const int D=src->D, N=src->MNL/8;
dim3 grid(D, CEIL(N, 32));
dim3 block(8, 32);
ker_bpmerge <8> <<<grid, block>>>
(src->data, dst->data, fix->data, norm, N, fix->N);
}
|
c207c281db487c6f5cebbdd1be8d4dbf42037702.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "basic/GPUBasic.h"
#include "model/GPUDevice.h"
#include "math/GPUMath.h"
#include "math/GPUUnaryElementWise.h"
#include "math/Log.h"
namespace Deep8 {
namespace Math {
template <typename T>
struct LogKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x) {
return cudaLog(x);
}
};
void LogGPU(const Tensor &x, Tensor &y) {
auto n = (int)x.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
hipLaunchKernelGGL(( UnaryElementWiseKernel<float, LogKernelOp<float>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<float>(),
y.data<float>(),
LogKernelOp<float>(),
n
);
break;
case DType::Float64:
hipLaunchKernelGGL(( UnaryElementWiseKernel<double, LogKernelOp<double>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<double>(),
y.data<double>(),
LogKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
hipLaunchKernelGGL(( UnaryElementWiseKernel<half, LogKernelOp<half>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<half>(),
y.data<half>(),
LogKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
template <typename T>
struct LogGradKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x, const T &y, const T &dy) {
return dy / x;
}
};
void LogGradGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &dy) {
auto n = (int)dx.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
hipLaunchKernelGGL(( UnaryElementWiseGradKernel<float, LogGradKernelOp<float>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<float>(),
dx.data<float>(),
y.data<float>(),
dy.data<float>(),
LogGradKernelOp<float>(),
n
);
break;
case DType::Float64:
hipLaunchKernelGGL(( UnaryElementWiseGradKernel<double, LogGradKernelOp<double>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<double>(),
dx.data<double>(),
y.data<double>(),
dy.data<double>(),
LogGradKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
hipLaunchKernelGGL(( UnaryElementWiseGradKernel<half, LogGradKernelOp<half>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<half>(),
dx.data<half>(),
y.data<half>(),
dy.data<half>(),
LogGradKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
}
} | c207c281db487c6f5cebbdd1be8d4dbf42037702.cu | #include "basic/GPUBasic.h"
#include "model/GPUDevice.h"
#include "math/GPUMath.h"
#include "math/GPUUnaryElementWise.h"
#include "math/Log.h"
namespace Deep8 {
namespace Math {
template <typename T>
struct LogKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x) {
return cudaLog(x);
}
};
void LogGPU(const Tensor &x, Tensor &y) {
auto n = (int)x.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
UnaryElementWiseKernel<float, LogKernelOp<float>> <<<grideSize, blockSize>>>(
x.data<float>(),
y.data<float>(),
LogKernelOp<float>(),
n
);
break;
case DType::Float64:
UnaryElementWiseKernel<double, LogKernelOp<double>> <<<grideSize, blockSize>>>(
x.data<double>(),
y.data<double>(),
LogKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
UnaryElementWiseKernel<half, LogKernelOp<half>> <<<grideSize, blockSize>>>(
x.data<half>(),
y.data<half>(),
LogKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
template <typename T>
struct LogGradKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x, const T &y, const T &dy) {
return dy / x;
}
};
void LogGradGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &dy) {
auto n = (int)dx.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
UnaryElementWiseGradKernel<float, LogGradKernelOp<float>> <<<grideSize, blockSize>>> (
x.data<float>(),
dx.data<float>(),
y.data<float>(),
dy.data<float>(),
LogGradKernelOp<float>(),
n
);
break;
case DType::Float64:
UnaryElementWiseGradKernel<double, LogGradKernelOp<double>> <<<grideSize, blockSize>>> (
x.data<double>(),
dx.data<double>(),
y.data<double>(),
dy.data<double>(),
LogGradKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
UnaryElementWiseGradKernel<half, LogGradKernelOp<half>> <<<grideSize, blockSize>>> (
x.data<half>(),
dx.data<half>(),
y.data<half>(),
dy.data<half>(),
LogGradKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
}
} |
e37fe0e140db229f7e97ae47e834d92a7f9de4fc.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/PointwiseOps.h>
#include <THH/THHNumerics.cuh>
namespace at { namespace native {
void addcmul_cuda_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.dtype(), "addcmul_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * b * c;
});
});
}
void addcdiv_cuda_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.dtype(), "addcdiv_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * (b / c);
});
});
}
void smooth_l1_backward_cuda_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&iter, &norm, beta] {
auto norm_val = norm.to<scalar_t>();
scalar_t beta_val(beta);
gpu_kernel(iter, [norm_val, beta_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < -beta_val)
return -norm_val * grad_output;
else if (x > beta_val)
return norm_val * grad_output;
else
return norm_val * x * grad_output / beta_val;
});
});
}
void huber_backward_cuda_kernel(TensorIterator& iter, const Scalar& norm, double delta) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "huber_backward_cuda", [&iter, &norm, delta] {
auto norm_val = norm.to<scalar_t>();
scalar_t delta_val(delta);
gpu_kernel(iter, [norm_val, delta_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < -delta_val) {
return -norm_val * grad_output * delta_val;
} else if (x > delta_val) {
return norm_val * grad_output * delta_val;
} else {
return norm_val * x * grad_output;
}
});
});
}
void mse_backward_cuda_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_backward_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return alpha * (a - b) * c;
});
});
}
REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel);
REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel);
REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel);
REGISTER_DISPATCH(huber_backward_stub, &huber_backward_cuda_kernel);
REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel);
}} // namespace at::native
| e37fe0e140db229f7e97ae47e834d92a7f9de4fc.cu | #include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/PointwiseOps.h>
#include <THC/THCNumerics.cuh>
namespace at { namespace native {
void addcmul_cuda_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.dtype(), "addcmul_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * b * c;
});
});
}
void addcdiv_cuda_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.dtype(), "addcdiv_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return a + alpha * (b / c);
});
});
}
void smooth_l1_backward_cuda_kernel(TensorIterator& iter, const Scalar& norm, double beta) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_backward_cuda", [&iter, &norm, beta] {
auto norm_val = norm.to<scalar_t>();
scalar_t beta_val(beta);
gpu_kernel(iter, [norm_val, beta_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < -beta_val)
return -norm_val * grad_output;
else if (x > beta_val)
return norm_val * grad_output;
else
return norm_val * x * grad_output / beta_val;
});
});
}
void huber_backward_cuda_kernel(TensorIterator& iter, const Scalar& norm, double delta) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "huber_backward_cuda", [&iter, &norm, delta] {
auto norm_val = norm.to<scalar_t>();
scalar_t delta_val(delta);
gpu_kernel(iter, [norm_val, delta_val]GPU_LAMBDA(scalar_t input, scalar_t target, scalar_t grad_output) -> scalar_t {
const auto x = input - target;
if (x < -delta_val) {
return -norm_val * grad_output * delta_val;
} else if (x > delta_val) {
return norm_val * grad_output * delta_val;
} else {
return norm_val * x * grad_output;
}
});
});
}
void mse_backward_cuda_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mse_backward_cuda", [&]() {
auto alpha = value.to<scalar_t>();
gpu_kernel(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b, scalar_t c) -> scalar_t {
return alpha * (a - b) * c;
});
});
}
REGISTER_DISPATCH(addcdiv_stub, &addcdiv_cuda_kernel);
REGISTER_DISPATCH(addcmul_stub, &addcmul_cuda_kernel);
REGISTER_DISPATCH(smooth_l1_backward_stub, &smooth_l1_backward_cuda_kernel);
REGISTER_DISPATCH(huber_backward_stub, &huber_backward_cuda_kernel);
REGISTER_DISPATCH(mse_backward_stub, &mse_backward_cuda_kernel);
}} // namespace at::native
|
37367cbf585a3b32a33bf9e0b254785ae016f34b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <helper_cuda.h>
#include <helper_functions.h>
#include <stdio.h>
#include <string.h>
#define TARGET_SIZE 512
#define BPP 1
#define BLOCKSIZE 16
#define TILESIZE 18
__shared__ short int Gx[3][3];
__shared__ short int Gy[3][3];
__global__ void local_filter(unsigned char* input, unsigned char* output,
int width, int height) {
Gx[0][0] = -1;
Gx[0][1] = 0;
Gx[0][2] = 1;
Gx[1][0] = -2;
Gx[1][1] = 0;
Gx[1][2] = 2;
Gx[2][0] = -1;
Gx[2][1] = 0;
Gx[2][2] = 1;
Gy[0][0] = 1;
Gy[0][1] = 2;
Gy[0][2] = 1;
Gy[1][0] = 0;
Gy[1][1] = 0;
Gy[1][2] = 0;
Gy[2][0] = -1;
Gy[2][1] = -2;
Gy[2][2] = -1;
__shared__ unsigned char localBlock[TILESIZE]
[TILESIZE]; // BLOCKSIZE=16; TILESIZE=18
int x = blockIdx.x * blockDim.x + threadIdx.x; //- 1;
int y = blockIdx.y * blockDim.y + threadIdx.y; //- 1;
int tx = threadIdx.x + 1;
int ty = threadIdx.y + 1;
if (tx >= 1 && ty >= 1 && tx <= TILESIZE - 2 && ty <= TILESIZE - 2) {
localBlock[(tx)][(ty)] = input[x + y * width];
}
if (tx - 1 == 0 && ty - 1 == 0) {
localBlock[tx - 1][ty - 1] = input[x - 1 + (y - 1) * width];
}
if (tx - 1 == 0 && ty >= 1 && ty <= TILESIZE - 2) {
localBlock[tx - 1][ty] = input[x - 1 + y * width];
}
if (tx - 1 == 0 && ty + 1 == TILESIZE - 1) {
localBlock[tx - 1][ty + 1] = input[x - 1 + (y + 1) * width];
}
if (ty - 1 == 0 && tx + 1 == TILESIZE - 1) {
localBlock[tx + 1][ty - 1] = input[x + 1 + (y - 1) * width];
}
if (ty >= 1 && ty <= TILESIZE - 2 && tx + 1 == TILESIZE - 1) {
localBlock[tx + 1][ty] = input[x + 1 + y * width];
}
if (ty + 1 == TILESIZE - 1 && tx + 1 == TILESIZE - 1) {
localBlock[tx + 1][ty + 1] = input[x + 1 + (y + 1) * width];
}
if (ty - 1 == 0 && tx >= 1 && tx <= TILESIZE - 2) {
localBlock[tx][ty - 1] = input[x + (y - 1) * width];
}
if (ty + 1 == TILESIZE - 1 && tx >= 1 && tx <= TILESIZE - 2) {
localBlock[tx][ty + 1] = input[x + (y + 1) * width];
}
__syncthreads();
int pixel = 0, pixelX = 0, pixelY = 0;
if (tx >= 1 && tx < TILESIZE - 1 && ty >= 1 && ty < TILESIZE - 1) {
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
pixelX += localBlock[tx + i][ty + j] * Gx[(i + 1)][(j + 1)];
pixelY += localBlock[tx + i][ty + j] * Gy[(i + 1)][(j + 1)];
}
}
pixel = abs(pixelX) + abs(pixelY);
pixel = (pixel < 0) ? 0 : pixel;
pixel = (pixel > 255) ? 255 : pixel;
}
output[__mul24(y, width) + x] = pixel;
}
int main(int argc, char** argv) {
unsigned char* h_data = NULL;
unsigned char* d_input = NULL;
unsigned char* d_output = NULL;
unsigned int height, width;
unsigned int newheight = TARGET_SIZE, newwidth = TARGET_SIZE;
// char file_name[]="./img/lena_grey.pgm";
char* file_name = argv[1];
char* image_path = sdkFindFilePath(file_name, argv[0]);
if (image_path == 0) exit(0);
printf("Open %s\n", image_path);
sdkLoadPGM(image_path, &h_data, &width, &height);
int size = height * width * sizeof(unsigned char) * BPP;
int newsize = newheight * newwidth * sizeof(unsigned char) * BPP;
printf("Original Image Size: [%d, %d], size: %d\n", height, width, size);
printf("New Image Size: [%d, %d], size: %d\n", newheight, newwidth,
newsize);
checkCudaErrors(hipMalloc((void**)&d_input, size));
checkCudaErrors(hipMemcpy(d_input, h_data, size, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_output, newsize));
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
dim3 dimGrid(newwidth / dimBlock.x, newheight / dimBlock.y, 1);
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
hipLaunchKernelGGL(( local_filter), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_output, newwidth, newheight);
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
unsigned char* h_odata;
h_odata = (unsigned char*)malloc(newsize);
checkCudaErrors(
hipMemcpy(h_odata, d_output, newsize, hipMemcpyDeviceToHost));
char outputpath[1024];
strcpy(outputpath, image_path + 6);
strcpy(outputpath + strlen(image_path + 6) - 4, "_local_output.pgm");
sdkSavePGM(outputpath, h_odata, newwidth, newheight);
printf("Wrote '%s'\n\n", outputpath);
checkCudaErrors(hipFree(d_input));
checkCudaErrors(hipFree(d_output));
free(image_path);
free(h_data);
free(h_odata);
}
| 37367cbf585a3b32a33bf9e0b254785ae016f34b.cu | #include <helper_cuda.h>
#include <helper_functions.h>
#include <stdio.h>
#include <string.h>
#define TARGET_SIZE 512
#define BPP 1
#define BLOCKSIZE 16
#define TILESIZE 18
__shared__ short int Gx[3][3];
__shared__ short int Gy[3][3];
__global__ void local_filter(unsigned char* input, unsigned char* output,
int width, int height) {
Gx[0][0] = -1;
Gx[0][1] = 0;
Gx[0][2] = 1;
Gx[1][0] = -2;
Gx[1][1] = 0;
Gx[1][2] = 2;
Gx[2][0] = -1;
Gx[2][1] = 0;
Gx[2][2] = 1;
Gy[0][0] = 1;
Gy[0][1] = 2;
Gy[0][2] = 1;
Gy[1][0] = 0;
Gy[1][1] = 0;
Gy[1][2] = 0;
Gy[2][0] = -1;
Gy[2][1] = -2;
Gy[2][2] = -1;
__shared__ unsigned char localBlock[TILESIZE]
[TILESIZE]; // BLOCKSIZE=16; TILESIZE=18
int x = blockIdx.x * blockDim.x + threadIdx.x; //- 1;
int y = blockIdx.y * blockDim.y + threadIdx.y; //- 1;
int tx = threadIdx.x + 1;
int ty = threadIdx.y + 1;
if (tx >= 1 && ty >= 1 && tx <= TILESIZE - 2 && ty <= TILESIZE - 2) {
localBlock[(tx)][(ty)] = input[x + y * width];
}
if (tx - 1 == 0 && ty - 1 == 0) {
localBlock[tx - 1][ty - 1] = input[x - 1 + (y - 1) * width];
}
if (tx - 1 == 0 && ty >= 1 && ty <= TILESIZE - 2) {
localBlock[tx - 1][ty] = input[x - 1 + y * width];
}
if (tx - 1 == 0 && ty + 1 == TILESIZE - 1) {
localBlock[tx - 1][ty + 1] = input[x - 1 + (y + 1) * width];
}
if (ty - 1 == 0 && tx + 1 == TILESIZE - 1) {
localBlock[tx + 1][ty - 1] = input[x + 1 + (y - 1) * width];
}
if (ty >= 1 && ty <= TILESIZE - 2 && tx + 1 == TILESIZE - 1) {
localBlock[tx + 1][ty] = input[x + 1 + y * width];
}
if (ty + 1 == TILESIZE - 1 && tx + 1 == TILESIZE - 1) {
localBlock[tx + 1][ty + 1] = input[x + 1 + (y + 1) * width];
}
if (ty - 1 == 0 && tx >= 1 && tx <= TILESIZE - 2) {
localBlock[tx][ty - 1] = input[x + (y - 1) * width];
}
if (ty + 1 == TILESIZE - 1 && tx >= 1 && tx <= TILESIZE - 2) {
localBlock[tx][ty + 1] = input[x + (y + 1) * width];
}
__syncthreads();
int pixel = 0, pixelX = 0, pixelY = 0;
if (tx >= 1 && tx < TILESIZE - 1 && ty >= 1 && ty < TILESIZE - 1) {
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
pixelX += localBlock[tx + i][ty + j] * Gx[(i + 1)][(j + 1)];
pixelY += localBlock[tx + i][ty + j] * Gy[(i + 1)][(j + 1)];
}
}
pixel = abs(pixelX) + abs(pixelY);
pixel = (pixel < 0) ? 0 : pixel;
pixel = (pixel > 255) ? 255 : pixel;
}
output[__mul24(y, width) + x] = pixel;
}
int main(int argc, char** argv) {
unsigned char* h_data = NULL;
unsigned char* d_input = NULL;
unsigned char* d_output = NULL;
unsigned int height, width;
unsigned int newheight = TARGET_SIZE, newwidth = TARGET_SIZE;
// char file_name[]="./img/lena_grey.pgm";
char* file_name = argv[1];
char* image_path = sdkFindFilePath(file_name, argv[0]);
if (image_path == 0) exit(0);
printf("Open %s\n", image_path);
sdkLoadPGM(image_path, &h_data, &width, &height);
int size = height * width * sizeof(unsigned char) * BPP;
int newsize = newheight * newwidth * sizeof(unsigned char) * BPP;
printf("Original Image Size: [%d, %d], size: %d\n", height, width, size);
printf("New Image Size: [%d, %d], size: %d\n", newheight, newwidth,
newsize);
checkCudaErrors(cudaMalloc((void**)&d_input, size));
checkCudaErrors(cudaMemcpy(d_input, h_data, size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_output, newsize));
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
dim3 dimGrid(newwidth / dimBlock.x, newheight / dimBlock.y, 1);
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
local_filter<<<dimGrid, dimBlock>>>(d_input, d_output, newwidth, newheight);
checkCudaErrors(cudaThreadSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
unsigned char* h_odata;
h_odata = (unsigned char*)malloc(newsize);
checkCudaErrors(
cudaMemcpy(h_odata, d_output, newsize, cudaMemcpyDeviceToHost));
char outputpath[1024];
strcpy(outputpath, image_path + 6);
strcpy(outputpath + strlen(image_path + 6) - 4, "_local_output.pgm");
sdkSavePGM(outputpath, h_odata, newwidth, newheight);
printf("Wrote '%s'\n\n", outputpath);
checkCudaErrors(cudaFree(d_input));
checkCudaErrors(cudaFree(d_output));
free(image_path);
free(h_data);
free(h_odata);
}
|
ecf61a954419e4c0476b39e4cc415123c1ad0d49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/normalize_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, Dtype epsilon,
const Dtype* data, Dtype* norm_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
norm_data[index] = sum + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scale(const int num, const int channels, const int spatial_dim,
Dtype alpha, const Dtype* data, const Dtype* norm_data,
Dtype beta, Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = alpha * data[index] * norm_data[n * spatial_dim + s] + beta * output_data[index];
}
}
template <typename Dtype>
__global__ void kernel_channel_self_scale(const int num, const int channels, const int spatial_dim,
const Dtype* norm_data, Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
input_output_data[index] *= norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int num, const int channels, const int spatial_dim,
Dtype alpha, const Dtype* data, const Dtype* norm_data,
Dtype beta, Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = alpha * data[index] / norm_data[n * spatial_dim + s] + beta * output_data[index];
}
}
template <typename Dtype>
__global__ void kernel_channel_self_div(const int num, const int channels, const int spatial_dim,
const Dtype* norm_data, Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
input_output_data[index] /= norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
__global__ void kernel_sign(const int count, const Dtype* input, Dtype* sign_out) {
CUDA_KERNEL_LOOP(index, count) {
sign_out[index] = (Dtype(0) < input[index]) - (input[index] < Dtype(0));
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* square_data = squared_.mutable_gpu_data();
Dtype* norm_data = (top.size() == 2) ? top[1]->mutable_gpu_data() : norm_.mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
if (normalize_type_ == "L2") {
caffe_gpu_powx(num*channels*spatial_dim, bottom_data, Dtype(2), square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-12, square_data, norm_data);
caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(0.5), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), bottom_data, norm_data, Dtype(0), top_data);
}
else if (normalize_type_ == "L1") {
caffe_gpu_abs(num*channels*spatial_dim, bottom_data, square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-6, square_data, norm_data);
//caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(-1), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), bottom_data, norm_data, Dtype(0), top_data);
}
else {
NOT_IMPLEMENTED;
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* square_data = squared_.mutable_gpu_data();
const Dtype* norm_data = (top.size() == 2) ? top[1]->gpu_data() : norm_.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* temp_diff = norm_.mutable_gpu_diff();
int num = top[0]->num();
int channels = top[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
if (propagate_down[0]) {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, top_data, top_diff, temp_diff);
if (normalize_type_ == "L2") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), top_data, temp_diff, Dtype(0), bottom_diff);
}
else if (normalize_type_ == "L1") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_sign<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num*channels*spatial_dim, bottom_data, square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), square_data, temp_diff, Dtype(0), bottom_diff);
}
else {
NOT_IMPLEMENTED;
}
caffe_gpu_sub(num * channels * spatial_dim, top_diff, bottom_diff, bottom_diff);
if (fix_gradient_) {
//// NOLINT_NEXT_LINE(whitespace/operators)
//kernel_channel_self_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
// CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, norm_data, bottom_diff);
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_self_div<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, norm_data, bottom_diff);
}
}
if (bp_norm_) {
const Dtype* norm_diff = top[1]->gpu_diff();
if (normalize_type_ == "L2") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), top_data, norm_diff, Dtype(1), bottom_diff);
}
else if (normalize_type_ == "L1") {
if (!propagate_down[0]) {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_sign<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num*channels*spatial_dim, bottom_data, square_data);
}
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), square_data, norm_diff, Dtype(1), bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer);
} // namespace caffe
| ecf61a954419e4c0476b39e4cc415123c1ad0d49.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/normalize_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, Dtype epsilon,
const Dtype* data, Dtype* norm_data) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
norm_data[index] = sum + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scale(const int num, const int channels, const int spatial_dim,
Dtype alpha, const Dtype* data, const Dtype* norm_data,
Dtype beta, Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = alpha * data[index] * norm_data[n * spatial_dim + s] + beta * output_data[index];
}
}
template <typename Dtype>
__global__ void kernel_channel_self_scale(const int num, const int channels, const int spatial_dim,
const Dtype* norm_data, Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
input_output_data[index] *= norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int num, const int channels, const int spatial_dim,
Dtype alpha, const Dtype* data, const Dtype* norm_data,
Dtype beta, Dtype* output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
output_data[index] = alpha * data[index] / norm_data[n * spatial_dim + s] + beta * output_data[index];
}
}
template <typename Dtype>
__global__ void kernel_channel_self_div(const int num, const int channels, const int spatial_dim,
const Dtype* norm_data, Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
input_output_data[index] /= norm_data[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
__global__ void kernel_sign(const int count, const Dtype* input, Dtype* sign_out) {
CUDA_KERNEL_LOOP(index, count) {
sign_out[index] = (Dtype(0) < input[index]) - (input[index] < Dtype(0));
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* square_data = squared_.mutable_gpu_data();
Dtype* norm_data = (top.size() == 2) ? top[1]->mutable_gpu_data() : norm_.mutable_gpu_data();
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
if (normalize_type_ == "L2") {
caffe_gpu_powx(num*channels*spatial_dim, bottom_data, Dtype(2), square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-12, square_data, norm_data);
caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(0.5), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), bottom_data, norm_data, Dtype(0), top_data);
}
else if (normalize_type_ == "L1") {
caffe_gpu_abs(num*channels*spatial_dim, bottom_data, square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype> << <CAFFE_GET_BLOCKS(num*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, 1e-6, square_data, norm_data);
//caffe_gpu_powx(num * spatial_dim, norm_data, Dtype(-1), norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), bottom_data, norm_data, Dtype(0), top_data);
}
else {
NOT_IMPLEMENTED;
}
}
template <typename Dtype>
void NormalizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* square_data = squared_.mutable_gpu_data();
const Dtype* norm_data = (top.size() == 2) ? top[1]->gpu_data() : norm_.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* temp_diff = norm_.mutable_gpu_diff();
int num = top[0]->num();
int channels = top[0]->channels();
int spatial_dim = bottom[0]->height() * bottom[0]->width();
if (propagate_down[0]) {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(num * spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, top_data, top_diff, temp_diff);
if (normalize_type_ == "L2") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), top_data, temp_diff, Dtype(0), bottom_diff);
}
else if (normalize_type_ == "L1") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_sign<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num*channels*spatial_dim, bottom_data, square_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), square_data, temp_diff, Dtype(0), bottom_diff);
}
else {
NOT_IMPLEMENTED;
}
caffe_gpu_sub(num * channels * spatial_dim, top_diff, bottom_diff, bottom_diff);
if (fix_gradient_) {
//// NOLINT_NEXT_LINE(whitespace/operators)
//kernel_channel_self_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
// CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, norm_data, bottom_diff);
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_self_div<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, norm_data, bottom_diff);
}
}
if (bp_norm_) {
const Dtype* norm_diff = top[1]->gpu_diff();
if (normalize_type_ == "L2") {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), top_data, norm_diff, Dtype(1), bottom_diff);
}
else if (normalize_type_ == "L1") {
if (!propagate_down[0]) {
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_sign<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num*channels*spatial_dim, bottom_data, square_data);
}
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scale<Dtype> << <CAFFE_GET_BLOCKS(num*channels*spatial_dim),
CAFFE_CUDA_NUM_THREADS >> >(num, channels, spatial_dim, Dtype(1), square_data, norm_diff, Dtype(1), bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer);
} // namespace caffe
|
96ce6b143d53fe4a6f76a6a19ae901dd3c2561bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/floor_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T>
__global__ void FloorKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = ::floor(X[i]);
}
}
template <>
bool FloorOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( FloorKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Floor, FloorOp<float, CUDAContext>);
} // namespace caffe2
| 96ce6b143d53fe4a6f76a6a19ae901dd3c2561bb.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/floor_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T>
__global__ void FloorKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = std::floor(X[i]);
}
}
template <>
bool FloorOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
FloorKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Floor, FloorOp<float, CUDAContext>);
} // namespace caffe2
|
c7bac2d3e055713f63ca92da355d246edf15412d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
void printMatrix(const int *A, int rows, int cols) {
for(int i = 0; i < rows*cols*4; i++){
printf("%d ", A[i]);
printf(" ");
if ((i+1)%9 == 0){
printf("|");
}
}
printf("\n");
};
void readInput_soa(const char *filename, int **Soa, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F0 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
for(int i = 0; i < *rows*(*cols)*(4); i++) {
fscanf(file, "%d ", &A_F0[i]);
}
fclose(file);
*Soa = A_F0;
};
void readInput_aos(const char *filename, int **Aos, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F1 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
for(int j = 0; j < 4; j++) {
int counter = 0;
for(int i = 0; i < *cols*(*rows); i++){
fscanf(file, "%d ", &A_F1[counter +j]);
counter = counter + 4;
}
}
fclose(file);
*Aos = A_F1;
}
__global__ void step_periodic_Soa(int * array,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int x = tId%(cols);
int y = (int) tId/rows;
//Colission
if (array[tId] == 1 && array[tId + 2*rows*cols] == 1){
if(array[tId + rows*cols] == 0 && array[tId+rows*cols*3] == 0){
array[tId] = 0;
array[tId + 2*rows*cols] = 0;
array[tId + rows*cols] = 1;
array[tId+rows*cols*3] = 1;
}
}
if (array[tId + rows*cols] == 1 && array[tId+rows*cols*3] == 1){
if(array[tId] == 0 && array[tId + 2*rows*cols] == 0){
array[tId + rows*cols] = 0;
array[tId+rows*cols*3] = 0;
array[tId] = 1;
array[tId + 2*rows*cols] = 1;
}
}
//streaming
int c_aux = x + 1;
if (c_aux == cols){
c_aux = 0;
}
if (array[tId] == 1){
array[(y*rows + c_aux)] = array[tId]*2;
}
//left
c_aux = x - 1;
if (c_aux < 0){
c_aux = cols -1;
}
if (array[tId+ 2*rows*cols] == 1){
array[(y*rows + c_aux) + 2*rows*cols] = array[tId+ 2*rows*cols]*2;
}
//top
c_aux = y + 1;
if (c_aux == rows){
c_aux = 0;
}
if (array[tId + rows*cols] == 1){
array[(c_aux*rows + x) + + rows*cols] = array[tId+ rows*cols]*2;
}
//bottom
c_aux = y + 1;
if (c_aux < 0){
c_aux = rows-1;
}
if (array[tId+ 3*rows*cols] == 1){
array[(c_aux*rows + x)+ 3*rows*cols] = array[tId+ 3*rows*cols]*2;
}
//Correction
if(array[tId] == 1){
array[tId] = 0;
}
if(array[tId] == 2){
array[tId] = 1;
}
if(array[tId+ rows*cols] == 1){
array[tId+ rows*cols] = 0;
}
if(array[tId+ rows*cols] == 2){
array[tId+ rows*cols] = 1;
}
if(array[tId+ 2*rows*cols] == 1){
array[tId+ 2*rows*cols] = 0;
}
if(array[tId+ 2*rows*cols] == 2){
array[tId+ 2*rows*cols] = 1;
}
if(array[tId+ 3*rows*cols] == 1){
array[tId+ 3*rows*cols] = 0;
}
if(array[tId+ 3*rows*cols] == 2){
array[tId+ 3*rows*cols] = 1;
}
}
};
//Periodic boundaries condition Array of Structures
__global__ void step_periodic_Aos(int * array,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int x = tId%(cols);
int y = (int) tId/rows;
//Colission
if (array[tId*4+0] == 1 && array[tId*4+2] == 1){
if(array[tId*4+1] == 0 && array[tId*4+3] == 0){
array[tId*4+0] = 0;
array[tId*4+2] = 0;
array[tId*4+1] = 1;
array[tId*4+3] = 1;
}
}
if (array[tId*4+1] == 1 && array[tId*4+3] == 1){
if(array[tId*4+0] == 0 && array[tId*4+2] == 0){
array[tId*4+1] = 0;
array[tId*4+3] = 0;
array[tId*4+0] = 1;
array[tId*4+2] = 1;
}
}
//streaming
//right
int c_aux = x + 1;
if (c_aux == cols){
c_aux = 0;
}
if (array[tId*4] == 1){
array[(y*rows + c_aux)*4] = array[tId*4]*2;
}
//left
c_aux = x - 1;
if (c_aux < 0){
c_aux = cols -1;
}
if (array[tId*4+2] == 1){
array[(y*rows + c_aux)*4 + 2] = array[tId*4+2]*2;
}
//top
c_aux = y + 1;
if (c_aux == rows){
c_aux = 0;
}
if (array[tId*4+1] == 1){
array[(c_aux*rows + x)*4 + 1] = array[tId*4+1]*2;
}
//bottom
c_aux = y + 1;
if (c_aux < 0){
c_aux = rows-1;
}
if (array[tId*4+3] == 1){
array[(c_aux*rows + x)*4 + 3] = array[tId*4+3]*2;
}
//Correction
for(int i = 0; i < 4; i++){
if(array[tId*4+i] == 1){
array[tId*4+i] = 0;
}
if(array[tId*4+i] == 2){
array[tId*4+i] = 1;
}
};
}
};
int main(int argc, char const *argv[])
{
int rows, cols;
int *Aos, *Soa;
int *d_Aos, *d_Soa;
readInput_aos("initial.txt", &Aos, &rows, &cols);
readInput_soa("initial.txt", &Soa, &rows, &cols);
//printMatrix(Soa,rows,cols);
int n = (int)(rows*cols);
int block_size = 256;
int grid_size = (int) ceil((float)n / block_size);
hipMalloc(&d_Aos, 4 * rows * cols * sizeof(int));
hipMalloc(&d_Soa, 4 * rows * cols * sizeof(int));
hipMemcpy(d_Aos, Aos, 4 * rows * cols * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Soa, Soa, 4 * rows * cols * sizeof(int), hipMemcpyHostToDevice);
for(int k = 0; k < 1000; k++){
//step_periodic_Aos<<<grid_size, block_size>>>(d_Aos, rows, cols);
hipLaunchKernelGGL(( step_periodic_Soa), dim3(grid_size), dim3(block_size), 0, 0, d_Soa, rows, cols);
}
hipMemcpy(Aos, d_Aos, 4 * rows * cols * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(Soa, d_Soa, 4 * rows * cols * sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_Aos);
hipFree(d_Soa);
//printf("----- \n");
//printMatrix(Soa,rows,cols);
//printMatrix(Aos,rows,cols);
//printf("----- \n");
//printMatrix(Soa,rows,cols);
return 0;
} | c7bac2d3e055713f63ca92da355d246edf15412d.cu | #include <stdio.h>
#include <math.h>
void printMatrix(const int *A, int rows, int cols) {
for(int i = 0; i < rows*cols*4; i++){
printf("%d ", A[i]);
printf(" ");
if ((i+1)%9 == 0){
printf("|");
}
}
printf("\n");
};
void readInput_soa(const char *filename, int **Soa, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F0 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
for(int i = 0; i < *rows*(*cols)*(4); i++) {
fscanf(file, "%d ", &A_F0[i]);
}
fclose(file);
*Soa = A_F0;
};
void readInput_aos(const char *filename, int **Aos, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F1 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
for(int j = 0; j < 4; j++) {
int counter = 0;
for(int i = 0; i < *cols*(*rows); i++){
fscanf(file, "%d ", &A_F1[counter +j]);
counter = counter + 4;
}
}
fclose(file);
*Aos = A_F1;
}
__global__ void step_periodic_Soa(int * array,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int x = tId%(cols);
int y = (int) tId/rows;
//Colission
if (array[tId] == 1 && array[tId + 2*rows*cols] == 1){
if(array[tId + rows*cols] == 0 && array[tId+rows*cols*3] == 0){
array[tId] = 0;
array[tId + 2*rows*cols] = 0;
array[tId + rows*cols] = 1;
array[tId+rows*cols*3] = 1;
}
}
if (array[tId + rows*cols] == 1 && array[tId+rows*cols*3] == 1){
if(array[tId] == 0 && array[tId + 2*rows*cols] == 0){
array[tId + rows*cols] = 0;
array[tId+rows*cols*3] = 0;
array[tId] = 1;
array[tId + 2*rows*cols] = 1;
}
}
//streaming
int c_aux = x + 1;
if (c_aux == cols){
c_aux = 0;
}
if (array[tId] == 1){
array[(y*rows + c_aux)] = array[tId]*2;
}
//left
c_aux = x - 1;
if (c_aux < 0){
c_aux = cols -1;
}
if (array[tId+ 2*rows*cols] == 1){
array[(y*rows + c_aux) + 2*rows*cols] = array[tId+ 2*rows*cols]*2;
}
//top
c_aux = y + 1;
if (c_aux == rows){
c_aux = 0;
}
if (array[tId + rows*cols] == 1){
array[(c_aux*rows + x) + + rows*cols] = array[tId+ rows*cols]*2;
}
//bottom
c_aux = y + 1;
if (c_aux < 0){
c_aux = rows-1;
}
if (array[tId+ 3*rows*cols] == 1){
array[(c_aux*rows + x)+ 3*rows*cols] = array[tId+ 3*rows*cols]*2;
}
//Correction
if(array[tId] == 1){
array[tId] = 0;
}
if(array[tId] == 2){
array[tId] = 1;
}
if(array[tId+ rows*cols] == 1){
array[tId+ rows*cols] = 0;
}
if(array[tId+ rows*cols] == 2){
array[tId+ rows*cols] = 1;
}
if(array[tId+ 2*rows*cols] == 1){
array[tId+ 2*rows*cols] = 0;
}
if(array[tId+ 2*rows*cols] == 2){
array[tId+ 2*rows*cols] = 1;
}
if(array[tId+ 3*rows*cols] == 1){
array[tId+ 3*rows*cols] = 0;
}
if(array[tId+ 3*rows*cols] == 2){
array[tId+ 3*rows*cols] = 1;
}
}
};
//Periodic boundaries condition Array of Structures
__global__ void step_periodic_Aos(int * array,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int x = tId%(cols);
int y = (int) tId/rows;
//Colission
if (array[tId*4+0] == 1 && array[tId*4+2] == 1){
if(array[tId*4+1] == 0 && array[tId*4+3] == 0){
array[tId*4+0] = 0;
array[tId*4+2] = 0;
array[tId*4+1] = 1;
array[tId*4+3] = 1;
}
}
if (array[tId*4+1] == 1 && array[tId*4+3] == 1){
if(array[tId*4+0] == 0 && array[tId*4+2] == 0){
array[tId*4+1] = 0;
array[tId*4+3] = 0;
array[tId*4+0] = 1;
array[tId*4+2] = 1;
}
}
//streaming
//right
int c_aux = x + 1;
if (c_aux == cols){
c_aux = 0;
}
if (array[tId*4] == 1){
array[(y*rows + c_aux)*4] = array[tId*4]*2;
}
//left
c_aux = x - 1;
if (c_aux < 0){
c_aux = cols -1;
}
if (array[tId*4+2] == 1){
array[(y*rows + c_aux)*4 + 2] = array[tId*4+2]*2;
}
//top
c_aux = y + 1;
if (c_aux == rows){
c_aux = 0;
}
if (array[tId*4+1] == 1){
array[(c_aux*rows + x)*4 + 1] = array[tId*4+1]*2;
}
//bottom
c_aux = y + 1;
if (c_aux < 0){
c_aux = rows-1;
}
if (array[tId*4+3] == 1){
array[(c_aux*rows + x)*4 + 3] = array[tId*4+3]*2;
}
//Correction
for(int i = 0; i < 4; i++){
if(array[tId*4+i] == 1){
array[tId*4+i] = 0;
}
if(array[tId*4+i] == 2){
array[tId*4+i] = 1;
}
};
}
};
int main(int argc, char const *argv[])
{
int rows, cols;
int *Aos, *Soa;
int *d_Aos, *d_Soa;
readInput_aos("initial.txt", &Aos, &rows, &cols);
readInput_soa("initial.txt", &Soa, &rows, &cols);
//printMatrix(Soa,rows,cols);
int n = (int)(rows*cols);
int block_size = 256;
int grid_size = (int) ceil((float)n / block_size);
cudaMalloc(&d_Aos, 4 * rows * cols * sizeof(int));
cudaMalloc(&d_Soa, 4 * rows * cols * sizeof(int));
cudaMemcpy(d_Aos, Aos, 4 * rows * cols * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Soa, Soa, 4 * rows * cols * sizeof(int), cudaMemcpyHostToDevice);
for(int k = 0; k < 1000; k++){
//step_periodic_Aos<<<grid_size, block_size>>>(d_Aos, rows, cols);
step_periodic_Soa<<<grid_size, block_size>>>(d_Soa, rows, cols);
}
cudaMemcpy(Aos, d_Aos, 4 * rows * cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Soa, d_Soa, 4 * rows * cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_Aos);
cudaFree(d_Soa);
//printf("----- \n");
//printMatrix(Soa,rows,cols);
//printMatrix(Aos,rows,cols);
//printf("----- \n");
//printMatrix(Soa,rows,cols);
return 0;
} |
cce39151818e7ed6ad5e9c37b4fdf83414b80716.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kMinimum(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fminf(mat1[i], mat2[i]);
}
} | cce39151818e7ed6ad5e9c37b4fdf83414b80716.cu | #include "includes.h"
__global__ void kMinimum(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = fminf(mat1[i], mat2[i]);
}
} |
407cc7f43b01e438061657da2e8c10793cbe366a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernCopyToFrame(int N, uint8_t * frame, float * src) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= N) {
return;
}
if (src[idx] < 0) {
frame[idx] = 0;
} else {
frame[idx] = (uint8_t) src[idx];
}
return;
} | 407cc7f43b01e438061657da2e8c10793cbe366a.cu | #include "includes.h"
__global__ void kernCopyToFrame(int N, uint8_t * frame, float * src) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= N) {
return;
}
if (src[idx] < 0) {
frame[idx] = 0;
} else {
frame[idx] = (uint8_t) src[idx];
}
return;
} |
9338d4350e9ed545d42719672d63366cee5de470.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include <math.h>
using namespace std;
const static int U_FILE = 1;
const static int V_FILE = 2;
const static int P_FILE = 3;
const int row = 41;
const int col = 41;
const int THREAD_NUM = 1024;
const double dx_cpu = 2.0 / (col - 1);
const double dy_cpu = 2.0 / (row - 1);
__device__ int nx = col;
__device__ int ny = row;
__device__ int nt = 500;
__device__ int nit = 50;
__device__ double c = 1.0;
// double dx = 2.0 / (nx - 1);
__device__ double dx = dx_cpu;
// double dy = 2.0 / (ny - 1);
__device__ double dy = dy_cpu;
__device__ double rho = 1.0;
__device__ double nu = 0.1;
__device__ double dt = 0.001;
__device__ int TOTAL_GPU = row * col;
__device__ int PER_GPU = 0;
template < typename Type > std::string to_str (const Type & t)
{
std::ostringstream os;
os << t;
return os.str ();
}
//2-d
string array_2d_to_json(double *vector,int row,int col){
string result = "[";
for(int i=0;i<row;i++){
result += "[";
for(int j=0;j<col;j++){
result += to_str(vector[i*col+j]);
if( j!=col-1 ){
result += ",";
}
}
result += "]";
if( i!=row-1 ){
result += ",";
}
}
return result+"]";
}
void print_array(double *vector,int row,int col){
for(int i=0;i<row;i++){
for(int j=0;j<col;j++){
printf("%f,",vector[i*col+j]);
}
printf("\n");
}
}
void write_string_to_file(string str,int flag){
ofstream outfile;
switch(flag){
case U_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/u_json.txt");
break;
case V_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/v_json.txt");
break;
case P_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/p_json.txt");
break;
default:
break;
}
outfile << str << endl;
outfile.close();
}
void zeros(double *vector,int row,int col){
for(int i=0;i<row*col;i++){
vector[i] = 0.0;
}
}
//2-d's index to 1-d's index
__device__ int index(int j,int i){
//nx is col
return j*nx+i;
}
// __device__ void zeros_gpu(double *vector,int row,int col){
// // printf("zeros_gpu\n");
// for(int i=0;i<row*col;i++){
// vector[i] = 0.0;
// }
// // printf("end\n");
// }
//
// __device__ void copy(double *copy,double *origin){
// for(int i=0;i<nx * ny;i++){
// copy[i] = origin[i];
// }
// }
__device__ void build_up_b(double *b,double *u, double *v,int startIndex,int endIndex){
int row = ny,col = nx;
for(int i=startIndex;i<=endIndex;i++){
if( i / col == 0 || i / col == row - 1 || i % col == 0 || i % col == col-1 ){
continue;
}
b[i] = (rho * (1 / dt *
((u[i+1] - u[i-1]) /
(2 * dx) + (v[i+col] - v[i-col]) / (2 * dy)) -
pow(((u[i+1] - u[i-1]) / (2 * dx)),2) -
2 * ((u[i+col] - u[i-col]) / (2 * dy) *
(v[i+1] - v[i-1]) / (2 * dx))-
pow(((v[i+col] - v[i-col]) / (2 * dy)),2)));
}
}
__device__ void pressure_poisson(double *p, double *b, double *pn,int startIndex,int endIndex){
// copy(pn,p);
for(int i=startIndex;i<=endIndex;i++){
pn[i] = p[i];
}
__syncthreads();
int row = ny,col = nx;
//q-loop have Data dependence
for(int q=0;q<nit;q++){
// copy(pn,p);
for(int i=startIndex;i<=endIndex;i++){
pn[i] = p[i];
}
__syncthreads();
for(int i=startIndex;i<=endIndex;i++){
if( !(i / col == 0 || i / col == row - 1 || i % col == 0 || i % col == col-1) ){
p[i] = (((pn[i+1] + pn[i-1]) * pow(dy,2) +
(pn[i+col] + pn[i-col]) * pow(dx,2)) /
(2 * (pow(dx,2) + pow(dy,2))) -
pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
b[i]);
}
// p[j][col-1] = p[j][col-2]
int rowIndex = i / col,colIndex = i % col;
p[index(rowIndex,col-1)] = p[index(rowIndex,col-2)];
p[index(0,colIndex)] = p[index(1,colIndex)];
p[index(rowIndex,0)] = p[index(rowIndex,1)];
p[index(row-1,colIndex)] = 0.0;
}
__syncthreads();
}
}
__device__ void cavity_flow(double *u,double *v,double *p,double *b,double *un,double *vn,double *pn,int startIndex,int endIndex){
// zeros_gpu(b,ny,nx);
for(int n=0;n<nt;n++){
// copy(un,u);
// copy(vn,v);
for(int i=startIndex;i<=endIndex;i++){
un[i] = u[i];
vn[i] = v[i];
}
__syncthreads();
// change b
build_up_b(b, u, v,startIndex,endIndex);
__syncthreads();
// change p
pressure_poisson(p, b, pn,startIndex,endIndex);
// __syncthreads();
//lock
int row = ny,col = nx;
for(int i=startIndex;i<=endIndex;i++){
if( i / col == 0 ){
// u[0][i] = 0;
u[i] = 0;
// v[0][i] = 0;
v[i] = 0;
}
if( i % col == 0 ){
// u[j][0] = 0;
u[i] = 0;
// v[j][0] = 0;
v[i] = 0;
}
if( i % col == col-1 ){
// u[j][col-1] = 0;
u[i] = 0;
// v[j][col-1] = 0;
v[i] = 0;
}
if( i / col == row-1 ){
// u[row-1][i] = 1;
u[i] = 1;
// v[row-1][i] = 0;
v[i] = 0;
}
if( i / col == 0 || i / col == row - 1 || i % col == 0 || i % col == col-1 ){
continue;
}
u[i] = (un[i]-
un[i] * dt / dx *
(un[i] - un[i-1]) -
vn[i] * dt / dy *
(un[i] - un[i-col]) -
dt / (2 * rho * dx) * (p[i+1] - p[i-1]) +
nu * (dt / pow(dx,2) *
(un[i+1] - 2 * un[i] + un[i-1]) +
dt / pow(dy,2) *
(un[i+col] - 2 * un[i] + un[i-col])));
v[i] = (vn[i] -
un[i] * dt / dx *
(vn[i] - vn[i-1]) -
vn[i] * dt / dy *
(vn[i] - vn[i-col]) -
dt / (2 * rho * dy) * (p[i+col] - p[i-col]) +
nu * (dt / pow(dx,2) *
(vn[i+1] - 2 * vn[i] + vn[i-1]) +
dt / pow(dy,2) *
(vn[i+col] - 2 * vn[i] + vn[i-col])));
}
__syncthreads();
}
}
__global__ void kernel(double *u,double *v,double *p,double *b,double *un,double *vn,double *pn){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if( threadId < TOTAL_GPU ){
if( TOTAL_GPU % THREAD_NUM == 0 ){
PER_GPU = TOTAL_GPU/THREAD_NUM;
}else{
PER_GPU = (TOTAL_GPU/THREAD_NUM) + 1;
}
//if PER_GPU = 0 =>
int startIndex = threadId*PER_GPU,endIndex = startIndex+PER_GPU-1;
// if( PER_GPU == 0 ){
// startIndex = endIndex = threadId;
// }
if( startIndex >= TOTAL_GPU ){
startIndex = TOTAL_GPU - 1;
}
if( endIndex >= TOTAL_GPU ){
endIndex = TOTAL_GPU - 1;
}
// printf("(%d,%d)(%d,%d)\n",startIndex,endIndex,threadId,PER_GPU);
cavity_flow(u,v,p,b,un,vn,pn,startIndex,endIndex);
}
}
int main(){
//2-d
double *u,*v,*p,*b,*un,*vn,*pn;
hipMallocManaged(&u, row*col*sizeof(double));
hipMallocManaged(&v, row*col*sizeof(double));
hipMallocManaged(&p, row*col*sizeof(double));
hipMallocManaged(&b, row*col*sizeof(double));
hipMallocManaged(&un, row*col*sizeof(double));
hipMallocManaged(&vn, row*col*sizeof(double));
hipMallocManaged(&pn, row*col*sizeof(double));
zeros(u,row,col);
zeros(v,row,col);
zeros(p,row,col);
zeros(b,row,col);
hipLaunchKernelGGL(( kernel), dim3(1),dim3(THREAD_NUM), 0, 0, u,v,p,b,un,vn,pn);
hipError_t error = hipGetLastError();
printf("CUDA error: %s\n", hipGetErrorString(error));
hipDeviceSynchronize();
hipError_t error2 = hipGetLastError();
printf("CUDA error: %s\n", hipGetErrorString(error2));
string u_json = array_2d_to_json(u,row,col),
v_json = array_2d_to_json(v,row,col),
p_json = array_2d_to_json(p,row,col);
write_string_to_file(u_json,U_FILE);
write_string_to_file(v_json,V_FILE);
write_string_to_file(p_json,P_FILE);
hipFree(u);
hipFree(v);
hipFree(p);
hipFree(b);
hipFree(un);
hipFree(vn);
hipFree(pn);
} | 9338d4350e9ed545d42719672d63366cee5de470.cu | #include <cstdio>
#include <cstdlib>
#include <vector>
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include <math.h>
using namespace std;
const static int U_FILE = 1;
const static int V_FILE = 2;
const static int P_FILE = 3;
const int row = 41;
const int col = 41;
const int THREAD_NUM = 1024;
const double dx_cpu = 2.0 / (col - 1);
const double dy_cpu = 2.0 / (row - 1);
__device__ int nx = col;
__device__ int ny = row;
__device__ int nt = 500;
__device__ int nit = 50;
__device__ double c = 1.0;
// double dx = 2.0 / (nx - 1);
__device__ double dx = dx_cpu;
// double dy = 2.0 / (ny - 1);
__device__ double dy = dy_cpu;
__device__ double rho = 1.0;
__device__ double nu = 0.1;
__device__ double dt = 0.001;
__device__ int TOTAL_GPU = row * col;
__device__ int PER_GPU = 0;
template < typename Type > std::string to_str (const Type & t)
{
std::ostringstream os;
os << t;
return os.str ();
}
//2-d
string array_2d_to_json(double *vector,int row,int col){
string result = "[";
for(int i=0;i<row;i++){
result += "[";
for(int j=0;j<col;j++){
result += to_str(vector[i*col+j]);
if( j!=col-1 ){
result += ",";
}
}
result += "]";
if( i!=row-1 ){
result += ",";
}
}
return result+"]";
}
void print_array(double *vector,int row,int col){
for(int i=0;i<row;i++){
for(int j=0;j<col;j++){
printf("%f,",vector[i*col+j]);
}
printf("\n");
}
}
void write_string_to_file(string str,int flag){
ofstream outfile;
switch(flag){
case U_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/u_json.txt");
break;
case V_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/v_json.txt");
break;
case P_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/p_json.txt");
break;
default:
break;
}
outfile << str << endl;
outfile.close();
}
void zeros(double *vector,int row,int col){
for(int i=0;i<row*col;i++){
vector[i] = 0.0;
}
}
//2-d's index to 1-d's index
__device__ int index(int j,int i){
//nx is col
return j*nx+i;
}
// __device__ void zeros_gpu(double *vector,int row,int col){
// // printf("zeros_gpu\n");
// for(int i=0;i<row*col;i++){
// vector[i] = 0.0;
// }
// // printf("end\n");
// }
//先假设是单线程,后面改为单个复制,这样子就没有调用的必要了
// __device__ void copy(double *copy,double *origin){
// for(int i=0;i<nx * ny;i++){
// copy[i] = origin[i];
// }
// }
__device__ void build_up_b(double *b,double *u, double *v,int startIndex,int endIndex){
int row = ny,col = nx;
for(int i=startIndex;i<=endIndex;i++){
if( i / col == 0 || i / col == row - 1 || i % col == 0 || i % col == col-1 ){
continue;
}
b[i] = (rho * (1 / dt *
((u[i+1] - u[i-1]) /
(2 * dx) + (v[i+col] - v[i-col]) / (2 * dy)) -
pow(((u[i+1] - u[i-1]) / (2 * dx)),2) -
2 * ((u[i+col] - u[i-col]) / (2 * dy) *
(v[i+1] - v[i-1]) / (2 * dx))-
pow(((v[i+col] - v[i-col]) / (2 * dy)),2)));
}
}
__device__ void pressure_poisson(double *p, double *b, double *pn,int startIndex,int endIndex){
// copy(pn,p);
for(int i=startIndex;i<=endIndex;i++){
pn[i] = p[i];
}
__syncthreads();
int row = ny,col = nx;
//q-loop have Data dependence
for(int q=0;q<nit;q++){
// copy(pn,p);
for(int i=startIndex;i<=endIndex;i++){
pn[i] = p[i];
}
__syncthreads();
for(int i=startIndex;i<=endIndex;i++){
if( !(i / col == 0 || i / col == row - 1 || i % col == 0 || i % col == col-1) ){
p[i] = (((pn[i+1] + pn[i-1]) * pow(dy,2) +
(pn[i+col] + pn[i-col]) * pow(dx,2)) /
(2 * (pow(dx,2) + pow(dy,2))) -
pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
b[i]);
}
// p[j][col-1] = p[j][col-2]
int rowIndex = i / col,colIndex = i % col;
p[index(rowIndex,col-1)] = p[index(rowIndex,col-2)];
p[index(0,colIndex)] = p[index(1,colIndex)];
p[index(rowIndex,0)] = p[index(rowIndex,1)];
p[index(row-1,colIndex)] = 0.0;
}
__syncthreads();
}
}
__device__ void cavity_flow(double *u,double *v,double *p,double *b,double *un,double *vn,double *pn,int startIndex,int endIndex){
// zeros_gpu(b,ny,nx);
for(int n=0;n<nt;n++){
// copy(un,u);
// copy(vn,v);
for(int i=startIndex;i<=endIndex;i++){
un[i] = u[i];
vn[i] = v[i];
}
__syncthreads();
// change b
build_up_b(b, u, v,startIndex,endIndex);
__syncthreads();
// change p
pressure_poisson(p, b, pn,startIndex,endIndex);
// __syncthreads();
//lock
int row = ny,col = nx;
for(int i=startIndex;i<=endIndex;i++){
if( i / col == 0 ){
// u[0][i] = 0;
u[i] = 0;
// v[0][i] = 0;
v[i] = 0;
}
if( i % col == 0 ){
// u[j][0] = 0;
u[i] = 0;
// v[j][0] = 0;
v[i] = 0;
}
if( i % col == col-1 ){
// u[j][col-1] = 0;
u[i] = 0;
// v[j][col-1] = 0;
v[i] = 0;
}
if( i / col == row-1 ){
// u[row-1][i] = 1;
u[i] = 1;
// v[row-1][i] = 0;
v[i] = 0;
}
if( i / col == 0 || i / col == row - 1 || i % col == 0 || i % col == col-1 ){
continue;
}
u[i] = (un[i]-
un[i] * dt / dx *
(un[i] - un[i-1]) -
vn[i] * dt / dy *
(un[i] - un[i-col]) -
dt / (2 * rho * dx) * (p[i+1] - p[i-1]) +
nu * (dt / pow(dx,2) *
(un[i+1] - 2 * un[i] + un[i-1]) +
dt / pow(dy,2) *
(un[i+col] - 2 * un[i] + un[i-col])));
v[i] = (vn[i] -
un[i] * dt / dx *
(vn[i] - vn[i-1]) -
vn[i] * dt / dy *
(vn[i] - vn[i-col]) -
dt / (2 * rho * dy) * (p[i+col] - p[i-col]) +
nu * (dt / pow(dx,2) *
(vn[i+1] - 2 * vn[i] + vn[i-1]) +
dt / pow(dy,2) *
(vn[i+col] - 2 * vn[i] + vn[i-col])));
}
__syncthreads();
}
}
__global__ void kernel(double *u,double *v,double *p,double *b,double *un,double *vn,double *pn){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if( threadId < TOTAL_GPU ){
if( TOTAL_GPU % THREAD_NUM == 0 ){
PER_GPU = TOTAL_GPU/THREAD_NUM;
}else{
PER_GPU = (TOTAL_GPU/THREAD_NUM) + 1;
}
//if PER_GPU = 0 =>
int startIndex = threadId*PER_GPU,endIndex = startIndex+PER_GPU-1;
// if( PER_GPU == 0 ){
// startIndex = endIndex = threadId;
// }
if( startIndex >= TOTAL_GPU ){
startIndex = TOTAL_GPU - 1;
}
if( endIndex >= TOTAL_GPU ){
endIndex = TOTAL_GPU - 1;
}
// printf("(%d,%d)(%d,%d)\n",startIndex,endIndex,threadId,PER_GPU);
cavity_flow(u,v,p,b,un,vn,pn,startIndex,endIndex);
}
}
int main(){
//2-d
double *u,*v,*p,*b,*un,*vn,*pn;
cudaMallocManaged(&u, row*col*sizeof(double));
cudaMallocManaged(&v, row*col*sizeof(double));
cudaMallocManaged(&p, row*col*sizeof(double));
cudaMallocManaged(&b, row*col*sizeof(double));
cudaMallocManaged(&un, row*col*sizeof(double));
cudaMallocManaged(&vn, row*col*sizeof(double));
cudaMallocManaged(&pn, row*col*sizeof(double));
zeros(u,row,col);
zeros(v,row,col);
zeros(p,row,col);
zeros(b,row,col);
kernel<<<1,THREAD_NUM>>>(u,v,p,b,un,vn,pn);
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s\n", cudaGetErrorString(error));
cudaDeviceSynchronize();
cudaError_t error2 = cudaGetLastError();
printf("CUDA error: %s\n", cudaGetErrorString(error2));
string u_json = array_2d_to_json(u,row,col),
v_json = array_2d_to_json(v,row,col),
p_json = array_2d_to_json(p,row,col);
write_string_to_file(u_json,U_FILE);
write_string_to_file(v_json,V_FILE);
write_string_to_file(p_json,P_FILE);
cudaFree(u);
cudaFree(v);
cudaFree(p);
cudaFree(b);
cudaFree(un);
cudaFree(vn);
cudaFree(pn);
} |
9f4ee9236b7b745c0843aea29fb6b709b8c1a5e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void helloWorld(){
printf("Hello World!\n");
}
int main(){
hipLaunchKernelGGL(( helloWorld), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 9f4ee9236b7b745c0843aea29fb6b709b8c1a5e9.cu | #include <stdio.h>
__global__ void helloWorld(){
printf("Hello World!\n");
}
int main(){
helloWorld<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
}
|
c93ee4d24eb9aa9db422c3bafda73c530b871404.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void EvolveValuesRK3_2_gpu( const float *dT, const float *midPointConservative1,
float *Conservative) {
Conservative[0] += (0.5f)* *dT * midPointConservative1[0];
Conservative[1] += (0.5f)* *dT * midPointConservative1[1];
Conservative[2] += (0.5f)* *dT * midPointConservative1[2];
Conservative[3] = Conservative[3];
Conservative[0] = Conservative[0] <= EPS ? EPS : Conservative[0];
}
// CUDA kernel function
__global__ void op_cuda_EvolveValuesRK3_2(
const float *arg0,
const float *__restrict arg1,
float *arg2,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
EvolveValuesRK3_2_gpu(arg0,
arg1+n*4,
arg2+n*4);
}
}
//host stub function
void op_par_loop_EvolveValuesRK3_2(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
float*arg0h = (float *)arg0.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: EvolveValuesRK3_2");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg0.data = OP_consts_h + consts_bytes;
arg0.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg0.data)[d] = arg0h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_EvolveValuesRK3_2), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += (float)set->size * arg1.size;
OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f;
}
| c93ee4d24eb9aa9db422c3bafda73c530b871404.cu | //
// auto-generated by op2.py
//
//user function
__device__ void EvolveValuesRK3_2_gpu( const float *dT, const float *midPointConservative1,
float *Conservative) {
Conservative[0] += (0.5f)* *dT * midPointConservative1[0];
Conservative[1] += (0.5f)* *dT * midPointConservative1[1];
Conservative[2] += (0.5f)* *dT * midPointConservative1[2];
Conservative[3] = Conservative[3];
Conservative[0] = Conservative[0] <= EPS ? EPS : Conservative[0];
}
// CUDA kernel function
__global__ void op_cuda_EvolveValuesRK3_2(
const float *arg0,
const float *__restrict arg1,
float *arg2,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
EvolveValuesRK3_2_gpu(arg0,
arg1+n*4,
arg2+n*4);
}
}
//host stub function
void op_par_loop_EvolveValuesRK3_2(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
float*arg0h = (float *)arg0.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: EvolveValuesRK3_2");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg0.data = OP_consts_h + consts_bytes;
arg0.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg0.data)[d] = arg0h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
op_cuda_EvolveValuesRK3_2<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += (float)set->size * arg1.size;
OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f;
}
|
779e2207c7e28c47ec727021db73be5cf4bc27da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu normal z -> d, Tue Feb 9 16:05:28 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
dsymv_kernel_U(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_D_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U
/**************************************************************
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
********************************************************************/
__global__ void
dsymv_kernel_U_sum(
int n,
double alpha,
int lda,
double beta,
double * __restrict__ y, int incy,
double const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
double Ax = MAGMA_D_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
| 779e2207c7e28c47ec727021db73be5cf4bc27da.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu normal z -> d, Tue Feb 9 16:05:28 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
dsymv_kernel_U(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_D_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U
/**************************************************************
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
********************************************************************/
__global__ void
dsymv_kernel_U_sum(
int n,
double alpha,
int lda,
double beta,
double * __restrict__ y, int incy,
double const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
double Ax = MAGMA_D_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
|
ca229e91ed18b07fef7fbf7c57483bec9e8ea284.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
extern float *Ex, *dev_Ex, coe_Ex, dt, dz;
extern int size_space, size_Ex;
const float epsilon = 8.85e-12;
void Ex_init_allocate(int size_Ex)
{
Ex = (float *)malloc(size_Ex* sizeof(float));
hipMalloc(&dev_Ex, size_Ex* sizeof(float));
}
void Ex_init_assignValue(int size_Ex)
{
for (int i = 0; i < size_Ex; i++){
Ex[i] = 0.f;
}
coe_Ex = dt / (epsilon * dz);
}
void Ex_transfer_host_device(int size_Ex)
{
hipMemcpy(dev_Ex, Ex, size_Ex * sizeof(float), hipMemcpyHostToDevice);
}
void Ex_transfer_device_host(int size_Ex)
{
hipMemcpy(Ex, dev_Ex, size_Ex * sizeof(float), hipMemcpyDeviceToHost);
}
__global__ void Ex_cmp_kernel(float* dev_Hy, float * dev_Ex, float coe_Ex, int step_space)
{
for (int i = 1; i < step_space; i++){
dev_Ex[i] = dev_Ex[i] - coe_Ex * (dev_Hy[i] - dev_Hy[i - 1]);
//test
//dev_Ex[i] = i / 10.0;
}
}
void Ex_checkout(int size)
{
cout << "Ex: size = " << size << endl;
cout << "coe_Ex = " << coe_Ex;
cout << "Ex: ";
for (int i = 0; i < size; i++)
{
cout << Ex[i] << "\t";
}
cout << endl;
}
void Ex_init(int size_space)
{
size_Ex = size_space + 1;
Ex_init_allocate(size_Ex);
Ex_init_assignValue(size_Ex);
}
__global__ void Ex_boundary_PEC_kernel(float* dev_Ex, int size_space)
{
dev_Ex[size_space] = 0.f;
} | ca229e91ed18b07fef7fbf7c57483bec9e8ea284.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
extern float *Ex, *dev_Ex, coe_Ex, dt, dz;
extern int size_space, size_Ex;
const float epsilon = 8.85e-12;
void Ex_init_allocate(int size_Ex)
{
Ex = (float *)malloc(size_Ex* sizeof(float));
cudaMalloc(&dev_Ex, size_Ex* sizeof(float));
}
void Ex_init_assignValue(int size_Ex)
{
for (int i = 0; i < size_Ex; i++){
Ex[i] = 0.f;
}
coe_Ex = dt / (epsilon * dz);
}
void Ex_transfer_host_device(int size_Ex)
{
cudaMemcpy(dev_Ex, Ex, size_Ex * sizeof(float), cudaMemcpyHostToDevice);
}
void Ex_transfer_device_host(int size_Ex)
{
cudaMemcpy(Ex, dev_Ex, size_Ex * sizeof(float), cudaMemcpyDeviceToHost);
}
__global__ void Ex_cmp_kernel(float* dev_Hy, float * dev_Ex, float coe_Ex, int step_space)
{
for (int i = 1; i < step_space; i++){
dev_Ex[i] = dev_Ex[i] - coe_Ex * (dev_Hy[i] - dev_Hy[i - 1]);
//test
//dev_Ex[i] = i / 10.0;
}
}
void Ex_checkout(int size)
{
cout << "Ex: size = " << size << endl;
cout << "coe_Ex = " << coe_Ex;
cout << "Ex: ";
for (int i = 0; i < size; i++)
{
cout << Ex[i] << "\t";
}
cout << endl;
}
void Ex_init(int size_space)
{
size_Ex = size_space + 1;
Ex_init_allocate(size_Ex);
Ex_init_assignValue(size_Ex);
}
__global__ void Ex_boundary_PEC_kernel(float* dev_Ex, int size_space)
{
dev_Ex[size_space] = 0.f;
} |
9a55de65a7c997087b541bfff7d41f1eeffb79dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error));\
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double)tp.tv_sec + (double)tp.tv_usec*1.e-6;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for(int i = 0; i < size; ++i)
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
void sumArrayOnHost(float *A, float *B, float *C, const int N)
{
for(int i = 0; i < N; ++i)
C[i] = A[i] + B[i];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
C[i] = A[i] + B[i];
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
float epsilon = 1e-8;
bool match = 1;
for(int i = 0; i < N; ++i)
if(abs(hostRef[i]-gpuRef[i]) > epsilon)
{
match = 0;
printf("Array do not match !\n");
printf("host %5.2f gpu %5.2f at current %d\n",
hostRef[i], gpuRef[i], i);
break;
}
if(match)
printf("Array match.\n\n");
}
int main(int argc, char** argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = cpuSecond();
sumArrayOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
int iLen = 1024l;
dim3 block(iLen);
dim3 grid((nElem+block.x-1) / block.x);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumArraysOnGPU) , dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("sumArraysOnGPU <<<%d, %d>>> time elapsed %f sec\n", grid.x,
block.x, iElaps);
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
| 9a55de65a7c997087b541bfff7d41f1eeffb79dd.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));\
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double)tp.tv_sec + (double)tp.tv_usec*1.e-6;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for(int i = 0; i < size; ++i)
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
void sumArrayOnHost(float *A, float *B, float *C, const int N)
{
for(int i = 0; i < N; ++i)
C[i] = A[i] + B[i];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
C[i] = A[i] + B[i];
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
float epsilon = 1e-8;
bool match = 1;
for(int i = 0; i < N; ++i)
if(abs(hostRef[i]-gpuRef[i]) > epsilon)
{
match = 0;
printf("Array do not match !\n");
printf("host %5.2f gpu %5.2f at current %d\n",
hostRef[i], gpuRef[i], i);
break;
}
if(match)
printf("Array match.\n\n");
}
int main(int argc, char** argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = cpuSecond();
sumArrayOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
int iLen = 1024l;
dim3 block(iLen);
dim3 grid((nElem+block.x-1) / block.x);
iStart = cpuSecond();
sumArraysOnGPU <<<grid, block>>>(d_A, d_B, d_C, nElem);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("sumArraysOnGPU <<<%d, %d>>> time elapsed %f sec\n", grid.x,
block.x, iElaps);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
|
a437633f06a4afe66c014484b5a282ee57f80161.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* A simple example of using an array of structures to store data on the device.
* This example is used to study the impact on performance of data layout on the
* GPU.
*
* AoS: one contiguous 64-bit read to get x and y (up to 300 cycles)
*/
#define LEN 1<<22
struct __align__(8) innerStruct
{
float x;
float y;
};
struct innerArray
{
float x[LEN];
float y[LEN];
};
void initialInnerStruct(innerStruct *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i].x = (float)(rand() & 0xFF) / 100.0f;
ip[i].y = (float)(rand() & 0xFF) / 100.0f;
}
return;
}
void testInnerStructHost(innerStruct *A, innerStruct *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C[idx].x = A[idx].x + 10.f;
C[idx].y = A[idx].y + 20.f;
}
return;
}
void checkInnerStruct(innerStruct *hostRef, innerStruct *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i].x - gpuRef[i].x) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i,
hostRef[i].x, gpuRef[i].x);
break;
}
if (abs(hostRef[i].y - gpuRef[i].y) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i,
hostRef[i].y, gpuRef[i].y);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerStruct(innerStruct *data, innerStruct * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
__global__ void warmup(innerStruct *data, innerStruct * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = nElem * sizeof(innerStruct);
innerStruct *h_A = (innerStruct *)malloc(nBytes);
innerStruct *hostRef = (innerStruct *)malloc(nBytes);
innerStruct *gpuRef = (innerStruct *)malloc(nBytes);
// initialize host array
initialInnerStruct(h_A, nElem);
testInnerStructHost(h_A, hostRef, nElem);
// allocate device memory
innerStruct *d_A, *d_C;
CHECK(hipMalloc((innerStruct**)&d_A, nBytes));
CHECK(hipMalloc((innerStruct**)&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
// set up offset for summaryAU: It is blocksize not offset. Thanks.CZ
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1: warmup
double iStart = seconds();
/*warmup<<<grid, block>>>(d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());*/
double iElaps = seconds() - iStart;
//printf("warmup <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
// iElaps);
//CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
//checkInnerStruct(hostRef, gpuRef, nElem);
//CHECK(hipGetLastError());
// kernel 2: testInnerStruct
iStart = seconds();
hipLaunchKernelGGL(( testInnerStruct), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("innerstruct <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkInnerStruct(hostRef, gpuRef, nElem);
CHECK(hipGetLastError());
// free memories both host and device
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| a437633f06a4afe66c014484b5a282ee57f80161.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* A simple example of using an array of structures to store data on the device.
* This example is used to study the impact on performance of data layout on the
* GPU.
*
* AoS: one contiguous 64-bit read to get x and y (up to 300 cycles)
*/
#define LEN 1<<22
struct __align__(8) innerStruct
{
float x;
float y;
};
struct innerArray
{
float x[LEN];
float y[LEN];
};
void initialInnerStruct(innerStruct *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i].x = (float)(rand() & 0xFF) / 100.0f;
ip[i].y = (float)(rand() & 0xFF) / 100.0f;
}
return;
}
void testInnerStructHost(innerStruct *A, innerStruct *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C[idx].x = A[idx].x + 10.f;
C[idx].y = A[idx].y + 20.f;
}
return;
}
void checkInnerStruct(innerStruct *hostRef, innerStruct *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i].x - gpuRef[i].x) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i,
hostRef[i].x, gpuRef[i].x);
break;
}
if (abs(hostRef[i].y - gpuRef[i].y) > epsilon)
{
match = 0;
printf("different on %dth element: host %f gpu %f\n", i,
hostRef[i].y, gpuRef[i].y);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerStruct(innerStruct *data, innerStruct * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
__global__ void warmup(innerStruct *data, innerStruct * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = nElem * sizeof(innerStruct);
innerStruct *h_A = (innerStruct *)malloc(nBytes);
innerStruct *hostRef = (innerStruct *)malloc(nBytes);
innerStruct *gpuRef = (innerStruct *)malloc(nBytes);
// initialize host array
initialInnerStruct(h_A, nElem);
testInnerStructHost(h_A, hostRef, nElem);
// allocate device memory
innerStruct *d_A, *d_C;
CHECK(cudaMalloc((innerStruct**)&d_A, nBytes));
CHECK(cudaMalloc((innerStruct**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// set up offset for summaryAU: It is blocksize not offset. Thanks.CZ
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1: warmup
double iStart = seconds();
/*warmup<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());*/
double iElaps = seconds() - iStart;
//printf("warmup <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
// iElaps);
//CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
//checkInnerStruct(hostRef, gpuRef, nElem);
//CHECK(cudaGetLastError());
// kernel 2: testInnerStruct
iStart = seconds();
testInnerStruct<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("innerstruct <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkInnerStruct(hostRef, gpuRef, nElem);
CHECK(cudaGetLastError());
// free memories both host and device
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
a1b8e9f413e722489b07d57bf5ae80cd9da8d4b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
/***Following code are adapted from https://github.com/trantalaiho/Cuda-Histogram and I makde the following modification
1. Change the template parameters of function callHistogramKernel to acomodate unsigned char
2. Modify the default block size to 256
3. Minor modification for function callHistogramKernelImpl
**/
#define H_ERROR_CHECKS 0
#if H_ERROR_CHECKS
#include <assert.h>
#include <stdio.h>
#endif
#define HBLOCK_SIZE_LOG2 7
#define HBLOCK_SIZE (1 << HBLOCK_SIZE_LOG2) // = 32
#define HMBLOCK_SIZE_LOG2 8
#define HMBLOCK_SIZE (1 << HMBLOCK_SIZE_LOG2) // = 32
#define LBLOCK_SIZE_LOG2 5
#define LBLOCK_SIZE (1 << LBLOCK_SIZE_LOG2) // = 256
#define LBLOCK_WARPS (LBLOCK_SIZE >> 5)
#define USE_MEDIUM_PATH 1
#if USE_MEDIUM_PATH
// For now only MEDIUM_BLOCK_SIZE_LOG2 == LBLOCK_SIZE_LOG2 works
# define MEDIUM_BLOCK_SIZE_LOG2 8
# define MEDIUM_BLOCK_SIZE (1 << MEDIUM_BLOCK_SIZE_LOG2) // 128
# define MBLOCK_WARPS (MEDIUM_BLOCK_SIZE >> 5)
#define MED_THREAD_DEGEN 16
#endif
#define RBLOCK_SIZE 64
#define RMAXSTEPS 80
#define NHSTEPSPERKEY 32
#define MAX_NHSTEPS 1024
#define MAX_MULTISTEPS 1024
#define MAX_NLHSTEPS 2048
#define GATHER_BLOCK_SIZE_LOG2 6
#define GATHER_BLOCK_SIZE (1 << GATHER_BLOCK_SIZE_LOG2)
#define STRATEGY_CHECK_INTERVAL_LOG2 7
#define STRATEGY_CHECK_INTERVAL (1 << STRATEGY_CHECK_INTERVAL_LOG2)
#define HISTOGRAM_DEGEN_LIMIT 20
#define HASH_COLLISION_STEPS 2
const int numActiveUpperLimit = 24;
#define USE_JENKINS_HASH 0
#define LARGE_NBIN_CHECK_INTERVAL_LOG2 5
#define LARGE_NBIN_CHECK_INTERVAL (1 << LARGE_NBIN_CHECK_INTERVAL_LOG2)
#define SMALL_BLOCK_SIZE_LOG2 6
#define SMALL_BLOCK_SIZE (1 << SMALL_BLOCK_SIZE_LOG2)
#define MAX_SMALL_STEPS 2040
static unsigned int* d_Data = NULL;
static unsigned int* d_Histogram = NULL;
//static unsigned char* d_Histogram = NULL;
static unsigned int* h_Histogram = NULL;
//static unsigned char* h_Histogram = NULL;
#if __CUDA_ARCH__ >= 120
#define USE_ATOMICS_HASH 0
#else
#define USE_ATOMICS_HASH 0
#endif
#if (__CUDA_ARCH__ >= 200)
# define USE_BALLOT_HISTOGRAM 1
#else
# define USE_BALLOT_HISTOGRAM 0
#endif
#ifndef __device__
#define __device__
#endif
#ifndef __host__
#define __host__
#endif
#ifndef __shared__
#define __shared__
#endif
enum histogram_type {
histogram_generic, /*!< \brief Generic histogram, for any types */
histogram_atomic_inc, /*!< \brief Each output-value is constant 1 */
histogram_atomic_add, /*!< \brief Output-type is such that atomicAdd()
//function can be used */
};
template <histogram_type histotype, typename OUTPUTTYPE>
static
int
getHistogramBufSize(OUTPUTTYPE zero, int nOut);
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
hipError_t
callHistogramKernel(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev = false,
hipStream_t stream = 0, void* tmpBuffer = NULL,
bool allowMultiPass = true);
template <histogram_type histotype, int nMultires, int nDim,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
hipError_t
callHistogramKernelNDim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT* starts, INDEXT* ends,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev = false,
hipStream_t stream = 0, void* tmpBuffer = NULL,
bool allowMultiPass = true);
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
hipError_t
callHistogramKernel2Dim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT x0, INDEXT x1,
INDEXT y0, INDEXT y1,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
hipStream_t stream, void* tmpBuffer,
bool allowMultiPass = true);
struct test_xform {
__host__ __device__ void operator() (unsigned int* input, int i, int* res_idx, unsigned int* res, int nres) const {
*res_idx++ = input[i];
*res++ = 1;
}
};
//struct test_xform {
// __host__ __device__ void operator() (unsigned int* input, int i, int* res_idx, unsigned char* res, int nres) const {
// *res_idx++ = input[i];
// *res++ = 1;
// }
//};
// Sum-functor to be used for reduction - just a normal sum of two integers
struct test_sumfun {
__device__ __host__ unsigned int operator() (unsigned int res1, unsigned int res2) const{
return res1 + res2;
}
};
//struct test_sumfun {
// __device__ __host__ unsigned char operator() (unsigned char res1, unsigned char res2) const{
// unsigned int sum = (unsigned int)res1+(unsigned int)res2;
// if(sum>255) return 255;
// return res1 + res2;
// }
//};
__global__ void computeHistogram(unsigned int *buffer, int size, unsigned int *histo )
{
__shared__ unsigned int temp[1024];
temp[threadIdx.x + 0] = 0;
temp[threadIdx.x + 256] = 0;
temp[threadIdx.x + 512] = 0;
temp[threadIdx.x + 768] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < size)
{
atomicAdd( &temp[buffer[i]], 1);
i += offset;
}
__syncthreads();
atomicAdd( &(histo[threadIdx.x + 0]), temp[threadIdx.x + 0] );
atomicAdd( &(histo[threadIdx.x + 256]), temp[threadIdx.x + 256] );
atomicAdd( &(histo[threadIdx.x + 512]), temp[threadIdx.x + 512] );
atomicAdd( &(histo[threadIdx.x + 768]), temp[threadIdx.x + 768] );
}
extern "C" void opt_init(unsigned int** h_Data, int width, int height)
{
hipMalloc((void **)&d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int));
hipMemset( d_Histogram, 0,HISTO_HEIGHT * HISTO_WIDTH * sizeof( unsigned int ));
unsigned int *data = new unsigned int[width*height];
for(int j = 0;j<height;++j)
{
memcpy(data+j*width, h_Data[j], sizeof(unsigned int)*width);
}
hipMalloc((void **)&d_Data, width*height*sizeof(unsigned int));
hipMemcpy(d_Data, data, width*height*sizeof(unsigned int), hipMemcpyHostToDevice);
delete []data;
}
extern "C" void opt_2dhisto(int size)
{
test_xform xform;
test_sumfun sum;
//unsigned char zero = 0x00;
callHistogramKernel<histogram_atomic_inc, 1>(d_Data, xform, sum, 0, size, 0U, &d_Histogram[0], HISTO_HEIGHT * HISTO_WIDTH, true);
h_Histogram = new unsigned int[HISTO_HEIGHT * HISTO_WIDTH];
hipMemcpy(h_Histogram, d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int), hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
}
extern "C" void opt_free()
{
hipFree(d_Histogram);
hipFree(d_Data);
}
extern "C" void opt_copyFromDevice(unsigned char* output)
{
for(int i = 0;i<HISTO_HEIGHT * HISTO_WIDTH;++i)
{
int value = h_Histogram[i]/1000;
output[i] = value>255?255:value;
}
// memcpy(output,h_Histogram,sizeof(unsigned char)*HISTO_HEIGHT*HISTO_WIDTH);
delete[] h_Histogram;
}
//#include <stdio.h>
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
__global__
void multireduceKernel(OUTPUTTYPE* input, int n, int nOut, int nsteps, SUMFUNTYPE sumFun, OUTPUTTYPE zero, int stride, OUTPUTTYPE* initialValues)
{
int tid = threadIdx.x;
int bidx = blockIdx.x;
int bidy = blockIdx.y;
OUTPUTTYPE myout = zero;
int i;
for (i = 0; i < nsteps; i++)
{
int subIndex = bidx * RBLOCK_SIZE + tid;
int cidx = subIndex + i * RBLOCK_SIZE * gridDim.x;
if (cidx < n)
{
// printf("t(%2d)b(%3d,%2d) r(%d)\n", tid, bidx, bidy, cidx + bidy * stride);
myout = sumFun(myout, input[cidx + bidy * stride]);
}
}
__shared__ OUTPUTTYPE tmp[RBLOCK_SIZE / 2];
for (int curLimit = RBLOCK_SIZE / 2; curLimit > 0; curLimit >>= 1)
{
// First write out the current result for threads above the limit
if (tid >= curLimit && tid < (curLimit << 1))
tmp[tid - curLimit] = myout;
// Otherwise wait for the write the complete and add that value to our result
__syncthreads();
if (tid < curLimit)
myout = sumFun(myout, tmp[tid]);
// IMPORTANT: Wait before new loop for the read to complete
__syncthreads();
}
// Done! myout contains the result for our block for thread 0!!
if (tid == 0)
{
// NOTE: If gridDim == 1 then we have finally reached the last iteration and
// can write the result into the final result-value array
// (ie. The same as initialvalue-array)
if (gridDim.x == 1)
{
OUTPUTTYPE initVal = initialValues[bidy];
initialValues[bidy] = sumFun(initVal, myout);
// And we are DONE!
}
else
{
// printf("t(%2d)b(%3d,%2d) w(%d)\n", tid, bidx, bidy, bidx + bidy * stride);
initialValues[bidx + bidy * stride] = myout;
}
}
}
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
static
void callMultiReduce(
int arrLen, int nOut, OUTPUTTYPE* h_results, OUTPUTTYPE* input,
SUMFUNTYPE sumFunObj, OUTPUTTYPE zero,
hipStream_t stream, void* tmpbuf, bool outInDev)
{
int n = arrLen;
// Set-up yet another temp buffer: (TODO: Pool alloc somehow?)
OUTPUTTYPE* resultTemp = NULL;
// TODO: Why do we need such a large temporary array?
// Shouldn't sizeof(OUTPUTTYPE) * nOut * xblocks be enough??
if (tmpbuf)
{
resultTemp = (OUTPUTTYPE*)tmpbuf;
}
else
{
hipMalloc((void**)&resultTemp, sizeof(OUTPUTTYPE) * nOut * arrLen);
#if H_ERROR_CHECKS
//printf("resultTemp = %p\n", resultTemp);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror0 = %s\n", hipGetErrorString( error ));
#endif
}
OUTPUTTYPE* output = resultTemp;
enum hipMemcpyKind fromOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyHostToDevice;
enum hipMemcpyKind toOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost;
// Copy initial values:
do
{
int steps = (n + (RBLOCK_SIZE - 1)) / RBLOCK_SIZE;
if (steps > RMAXSTEPS)
steps = RMAXSTEPS;
int yblocks = nOut;
int xblocks = (n + (steps * RBLOCK_SIZE - 1)) / (steps * RBLOCK_SIZE);
const dim3 block = RBLOCK_SIZE;
const dim3 grid(xblocks, yblocks, 1);
if (xblocks == 1) // LAST ONE to start
{
//printf("hipMemcpy(%p, %p, %d, %d);\n", output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut);
if (stream != 0)
hipMemcpyAsync(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut, stream);
else
hipMemcpy(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut);
}
#if H_ERROR_CHECKS
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror1 = %s\n", hipGetErrorString( error ));
#endif
// Then the actual kernel call
hipLaunchKernelGGL(( multireduceKernel), dim3(grid), dim3(block), 0, stream, input, n, nOut, steps, sumFunObj, zero, arrLen, output);
#if H_ERROR_CHECKS
error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror2 = %s\n", hipGetErrorString( error ));
#endif
if (xblocks > 1)
{
// Swap pointers:
OUTPUTTYPE* tmpptr = output;
output = input;
input = tmpptr;
}
n = xblocks;
} while(n > 1);
// Then copy back the results:
//hipMemcpyAsync(h_results, resultTemp, sizeof(OUTPUTTYPE) * nOut, hipMemcpyDeviceToHost, CURRENT_STREAM());
// TODO: Support async copy here??
if (outInDev && stream != 0)
hipMemcpyAsync(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut, stream);
else
hipMemcpy(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut);
#if H_ERROR_CHECKS
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror3 = %s\n", hipGetErrorString( error ));
#endif
if (!tmpbuf)
{
hipFree(resultTemp);
}
#if H_ERROR_CHECKS
error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror4 = %s\n", hipGetErrorString( error ));
#endif
}
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
__global__
void gatherKernel(SUMFUNTYPE sumfunObj, OUTPUTTYPE* blockOut, int nOut, int nEntries, OUTPUTTYPE zero)
{
//int resIdx = threadIdx.x + blockDim.x * blockIdx.x;
int resIdx = blockIdx.x;
if (resIdx < nOut)
{
// Let's divide the nEntries first evenly on all threads and read 4 entries in a row
int locEntries = (nEntries) >> (GATHER_BLOCK_SIZE_LOG2);
// Note: Original array entry is stored in resIdx + nOut * nEntries!
OUTPUTTYPE res = zero;
if (threadIdx.x == 0)
res = blockOut[resIdx + nOut * nEntries];
// Shift starting ptr:
blockOut = &blockOut[resIdx];
int locIdx = threadIdx.x * locEntries;
for (int i=0; i < locEntries/4; i++)
{
OUTPUTTYPE x1 = blockOut[nOut * (locIdx + (i << 2))];
OUTPUTTYPE x2 = blockOut[nOut * (locIdx + (i << 2) + 1)];
OUTPUTTYPE x3 = blockOut[nOut * (locIdx + (i << 2) + 2)];
OUTPUTTYPE x4 = blockOut[nOut * (locIdx + (i << 2) + 3)];
res = sumfunObj(res, x1);
res = sumfunObj(res, x2);
res = sumfunObj(res, x3);
res = sumfunObj(res, x4);
}
// Then do the rest
for (int j = (locEntries/4)*4; j < locEntries; j++)
{
OUTPUTTYPE x1 = blockOut[nOut * (locIdx + j)];
res = sumfunObj(res, x1);
}
// Still handle rest starting from index "locEntries * BLOCK_SIZE":
locIdx = threadIdx.x + (locEntries << GATHER_BLOCK_SIZE_LOG2);
if (locIdx < nEntries)
res = sumfunObj(res, blockOut[nOut * locIdx]);
// Ok - all that is left is to do the final parallel reduction between threads:
{
__shared__ OUTPUTTYPE data[GATHER_BLOCK_SIZE];
//volatile OUTPUTTYPE* data = (volatile OUTPUTTYPE*)&dataTmp[0];
// TODO Compiler complains with volatile from this - why?
//error: no operator "=" matches these operands
// operand types are: volatile myTestType_s = myTestType
// Silly - does not happen with built-in types (nice...)
data[threadIdx.x] = res;
#if GATHER_BLOCK_SIZE == 512
__syncthreads();
if (threadIdx.x < 256)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 256]);
#endif
#if GATHER_BLOCK_SIZE >= 256
__syncthreads();
if (threadIdx.x < 128)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 128]);
#endif
#if GATHER_BLOCK_SIZE >= 128
__syncthreads();
if (threadIdx.x < 64)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 64]);
__syncthreads();
#endif
#if GATHER_BLOCK_SIZE >= 64
__syncthreads();
if (threadIdx.x < 32)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 32]);
#endif
__syncthreads();
if (threadIdx.x < 16) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 16]);
__syncthreads();
if (threadIdx.x < 8) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 8]);
__syncthreads();
if (threadIdx.x < 4) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 4]);
__syncthreads();
if (threadIdx.x < 2) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 2]);
__syncthreads();
if (threadIdx.x < 1) *blockOut = sumfunObj(data[threadIdx.x], data[threadIdx.x + 1]);
}
}
}
#define FREE_MUTEX_ID 0xffeecafe
#define TAKE_WARP_MUTEX(ID) do { \
int warpIdWAM = threadIdx.x >> 5; \
__shared__ volatile int lockVarWarpAtomicMutex;\
bool doneWAM = false;\
bool allDone = false; \
while(!allDone){ \
__syncthreads(); \
if (!doneWAM) lockVarWarpAtomicMutex = warpIdWAM; \
__syncthreads(); \
if (lockVarWarpAtomicMutex == FREE_MUTEX_ID) allDone = true; \
__syncthreads(); \
if (lockVarWarpAtomicMutex == warpIdWAM){ /* We Won */
// User code comes here
#define GIVE_WARP_MUTEX(ID) doneWAM = true; \
lockVarWarpAtomicMutex = FREE_MUTEX_ID; \
} \
} \
__syncthreads(); \
} while(0)
// NOTE: Init must be called from divergent-free code (or with exited warps)
#define INIT_WARP_MUTEX2(MUTEX) do { MUTEX = FREE_MUTEX_ID; __syncthreads(); } while(0)
#if 0 && __CUDA_ARCH__ >= 120 // TODO: NOT WORKING THIS CODEPATH - find out why
#define TAKE_WARP_MUTEX2(MUTEX) do { \
int warpIdWAM = 1000000 + threadIdx.x / 32; \
bool doneWAM = false;\
while(!doneWAM){ \
int old = -2; \
if (threadIdx.x % 32 == 0) \
old = atomicCAS(&MUTEX, FREE_MUTEX_ID, warpIdWAM); \
if (__any(old == FREE_MUTEX_ID)){ /* We Won */
// User code comes here
#define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \
atomicExch(&MUTEX, FREE_MUTEX_ID); \
} \
} \
} while(0)
#else
#define TAKE_WARP_MUTEX2(MUTEX) do { \
int warpIdWAM = 1000000 + threadIdx.x / 32; \
bool doneWAM = false;\
bool allDone = false; \
while(!allDone){ \
__syncthreads(); \
if (!doneWAM) MUTEX = warpIdWAM; \
__syncthreads(); \
if (MUTEX == FREE_MUTEX_ID) allDone = true; \
if (MUTEX == warpIdWAM){ /* We Won */
// User code comes here
#define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \
MUTEX = FREE_MUTEX_ID; \
} \
} \
} while(0)
#endif
#if USE_BALLOT_HISTOGRAM
template <typename OUTPUTTYPE>
static inline __device__
OUTPUTTYPE mySillyPopCount(unsigned int mymask, OUTPUTTYPE zero)
{
return zero;
}
static inline __device__
int mySillyPopCount(unsigned int mymask, int zero)
{
return (int)__popc(mymask);
}
static inline __device__
unsigned int mySillyPopCount(unsigned int mymask, unsigned int zero)
{
return (unsigned int)__popc(mymask);
}
static inline __device__
long long mySillyPopCount(unsigned int mymask, long long zero)
{
return (long long)__popc(mymask);
}
static inline __device__
unsigned long long mySillyPopCount(unsigned int mymask, unsigned long long zero)
{
return (unsigned long long)__popc(mymask);
}
template <histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
bool ballot_makeUnique(
SUMFUNTYPE sumfunObj,
int myKey, OUTPUTTYPE* myOut, OUTPUTTYPE* s_vals, int* s_keys, int* nSameKeys)
{
unsigned int mymask;
/* #if HBLOCK_SIZE != 32
#error Please use threadblocks of 32 threads
#endif*/
//startKey = s_keys[startIndex];
// First dig out for each thread who are the other threads that have the same key as us...
//int i = 0;
if (checkNSame) {
unsigned int donemask = 0;
int startIndex = 32 - 1;
int startKey = s_keys[startIndex];
*nSameKeys = 0;
while (~donemask != 0 /*&& i++ < 32*/)
{
unsigned int mask = __ballot(myKey == startKey);
if (myKey == startKey)
mymask = mask;
donemask |= mask;
{
int nSame = __popc(mask);
if (nSame > *nSameKeys)
*nSameKeys = nSame;
}
startIndex = 31 - __clz(~donemask);
//if (myKey == 0) printf("Startindex = %d, donemask = 0x%08x, mask = 0x%08x\n", startIndex, donemask, mask);
if (startIndex >= 0)
startKey = s_keys[startIndex];
}
} else {
unsigned int donemask = 0;
int startIndex = 32 - 1;
while (startIndex >= 0)
{
int startKey = s_keys[startIndex];
unsigned int mask = __ballot(myKey == startKey);
if (myKey == startKey)
mymask = mask;
donemask |= mask;
startIndex = 31 - __clz(~donemask);
}
}
// Ok now mymask contains those threads - now we just reduce locally - all threads run at the same
// time, but reducing threads lose always half of them with each iteration - it would help
// to work with more than 32 entries, but the algorithm seems to get tricky there.
{
// Compute the left side of the mask and the right side. rmask first will contain our thread index, but
// we zero it out immediately
unsigned int lmask = (mymask >> (threadIdx.x & 31)) << (threadIdx.x & 31);
int IamNth = __popc(lmask) - 1;
bool Iwrite = IamNth == 0;
if (histotype == histogram_atomic_inc)
{
// Fast-path for atomic inc
*myOut = mySillyPopCount(mymask, *myOut);
return Iwrite && (myKey >= 0);
}
else
{
unsigned int rmask = mymask & (~lmask);
// Now compute which number is our thread in the subarray of those threads that have the same key
// starting from the left (ie. index == 31). So for thread 31 this will be always zero.
int nextIdx = 31 - __clz(rmask);
s_vals[(threadIdx.x & 31)] = *myOut;
//if (myKey == 0) printf("tid = %02d, IamNth = %02d, mask = 0x%08x, rmask = 0x%08x \n", threadIdx.x, IamNth, mymask, rmask);
//bool done = __all(nextIdx < 0);
// TODO: Unroll 5?
while (!__all(nextIdx < 0))
{
// Reduce towards those threads that have lower IamNth
// Our thread reads the next one if our internal ID is even
if ((IamNth & 0x1) == 0)
{
if (nextIdx >= 0){
// if (myKey == 0) printf("tid:%02d, add with %02d\n", threadIdx.x, nextIdx);
*myOut = sumfunObj(*myOut, s_vals[nextIdx]);
}
// And writes to the shared memory if our internal ID is third on every 4-long subarray:
if ((IamNth & 0x3) == 2)
{
// if (myKey == 0) printf("Tid %02d, store\n", threadIdx.x);
s_vals[(threadIdx.x & 31)] = *myOut;
}
}
// Now the beautiful part: Kill every other bit in the rmask bitfield. How, you ask?
// Using ballot: Every bit we want to kill has IamNth odd, or conversely, we only
// want to keep those bits that have IamNth even...
rmask &= __ballot((IamNth & 0x1) == 0);
nextIdx = 31 - __clz(rmask);
// if (myKey == 0) printf("tid = %02d, next = %02d, key = %d\n", threadIdx.x, rmask, nextIdx, myKey);
IamNth >>= 1;
//printf("i = %d\n", i);
}
// And voila, we are done - write out the result:
return Iwrite && (myKey >= 0);
}
}
}
#endif
template <bool laststeps, typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void myAtomicWarpAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, bool Iwrite, int* warpmutex)
{
// Taken from http://forums.nvidia.com/index.php?showtopic=72925
// This is a tad slow, but allows arbitrary operation
// For writes of 16 bytes or less AtomicCAS could be faster
// (See CUDA programming guide)
TAKE_WARP_MUTEX(0);
//__shared__ int warpmutex;
//INIT_WARP_MUTEX2(*warpmutex);
//TAKE_WARP_MUTEX2(*warpmutex);
bool write = Iwrite;
#define MU_TEMP_MAGIC 0xffffaaaa
*keyAddr = MU_TEMP_MAGIC;
while (1)
{
// Vote whose turn is it - remember, one thread does succeed always!:
if (write) *keyAddr = threadIdx.x;
if (*keyAddr == MU_TEMP_MAGIC)
break;
if (*keyAddr == threadIdx.x) // We won!
{
// Do arbitrary atomic op:
*addr = sumfunObj(*addr, val);
write = false;
*keyAddr = MU_TEMP_MAGIC;
}
}
GIVE_WARP_MUTEX(0);
//GIVE_WARP_MUTEX2(*warpmutex);
#undef MU_TEMP_MAGIC
}
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void myAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj)
{
// Taken from http://forums.nvidia.com/index.php?showtopic=72925
// This is a tad slow, but allows arbitrary operation
// For writes of 16 bytes or less AtomicCAS could be faster
// (See CUDA programming guide)
bool write = true;
#define MU_TEMP_MAGIC 0xffffaaaa
*keyAddr = MU_TEMP_MAGIC;
while (1)
{
// Vote whose turn is it - remember, one thread does succeed always!:
if (write ) *keyAddr = threadIdx.x;
if (*keyAddr == MU_TEMP_MAGIC)
break;
if (*keyAddr == threadIdx.x) // We won!
{
// Do arbitrary atomic op:
*addr = sumfunObj(*addr, val);
write = false;
*keyAddr = MU_TEMP_MAGIC;
}
}
#undef MU_TEMP_MAGIC
}
/*static __inline__ __device__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val)
{
return __ullAtomicAdd(address, val);
}*/
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val)
{
//*addr = val;
}
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, int val)
{
//*addr = val;
}
#if 0
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, float val)
{
//*addr = val;
}
#endif
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, unsigned int val)
{
//*addr = val;
}
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void myAtomicAddStats(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, int* nSameOut, bool Iwrite)
{
// Taken from http://forums.nvidia.com/index.php?showtopic=72925
bool write = true;
*keyAddr = 0xffffffff;
while (Iwrite)
{
// Vote whose turn is it - remember, one thread does succeed always!:
if (write ) *keyAddr = threadIdx.x;
if (*keyAddr == 0xffffffff)
break;
if (*keyAddr == threadIdx.x) // We won!
{
// Do arbitrary atomic op:
*addr = sumfunObj(*addr, val);
write = false;
*keyAddr = 0xffffffff;
} else {
*nSameOut = *nSameOut + 1;
}
}
{
// Then find max
__shared__ int nSame[HBLOCK_SIZE];
nSame[threadIdx.x] = *nSameOut;
#define TMPMAX(A,B) (A) > (B) ? (A) : (B)
#define tidx threadIdx.x
if (tidx < 16) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 16]);
if (tidx < 8) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 8]);
if (tidx < 4) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 4]);
if (tidx < 2) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 2]);
if (tidx < 1) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 1]);
#undef TMPMAX
#undef tidx
// Broadcast to all threads
*nSameOut = nSame[0];
}
}
// TODO: Make unique within one warp?
template<histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
bool reduceToUnique(OUTPUTTYPE* res, int myKey, int* nSame, SUMFUNTYPE sumfunObj, int* keys, OUTPUTTYPE* outputs)
{
keys[(threadIdx.x & 31)] = myKey;
#if USE_BALLOT_HISTOGRAM
return ballot_makeUnique<histotype, checkNSame>(sumfunObj, myKey, res, outputs, keys, nSame);
#else
{
int i;
bool writeResult = myKey >= 0;
int myIdx = (threadIdx.x & 31) + 1;
outputs[(threadIdx.x & 31)] = *res;
// The assumption for sanity of this loop here is that all the data is in registers or shared memory and
// hence this loop will not actually be __that__ slow.. Also it helps if the data is spread out (ie. there are
// a lot of different indices here)
for (i = 1; i < 32 && writeResult; i++)
{
if (myIdx >= 32)
myIdx = 0;
// Is my index the same as the index on the index-list?
if (keys[myIdx] == myKey /*&& threadIdx.x != myIdx*/)
{
if (checkNSame) (*nSame)++;
// If yes, then we can sum up the result using users sum-functor
*res = sumfunObj(*res, outputs[myIdx]);
// But if somebody else is summing up this index already, we don't need to (wasted effort done here)
if (myIdx < threadIdx.x)
writeResult = false;
}
myIdx++;
}
// Ok - we are done - now we can proceed in writing the result (if some other thread isn't doing it already)
if (checkNSame)
{
// Manual reduce
int tid = threadIdx.x;
keys[tid] = *nSame;
if (tid < 16) keys[tid] = keys[tid] > keys[tid + 16] ? keys[tid] : keys[tid+16];
if (tid < 8) keys[tid] = keys[tid] > keys[tid + 8] ? keys[tid] : keys[tid+8];
if (tid < 4) keys[tid] = keys[tid] > keys[tid + 4] ? keys[tid] : keys[tid+4];
if (tid < 2) keys[tid] = keys[tid] > keys[tid + 2] ? keys[tid] : keys[tid+2];
if (tid < 1) keys[tid] = keys[tid] > keys[tid + 1] ? keys[tid] : keys[tid+1];
*nSame = keys[0];
}
return writeResult;
}
#endif
}
static inline __host__ __device__
void checkStrategyFun(bool *reduce, int nSame, int nSameTot, int step, int nBinSetslog2)
{
#if __CUDA_ARCH__ >= 200
#define STR_LIMIT 12
#else
#define STR_LIMIT 24
#endif
// TODO: Fix average case - a lot of things to tune here...
if ((nSameTot > STR_LIMIT * step || nSame > STR_LIMIT))
*reduce = true;
else
*reduce = false;
#undef STR_LIMIT
}
// Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up)
template <typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAdd2(float* addr, float val, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 200
atomicAdd(addr, val);
#else
myAtomicAdd(addr, val, key, sumFunObj);
#endif
}
template <typename SUMFUNTYPE,typename OUTPUTTYPE>
static inline __device__
void wrapAtomicAdd2(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj)
{
atomicAdd(addr, val);
}
// Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up)
template <bool laststeps, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAdd2Warp(float* addr, float val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 200
if (Iwrite) atomicAdd(addr, val);
#else
myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex);
#endif
}
template <bool laststeps, typename SUMFUNTYPE,typename OUTPUTTYPE>
static inline __device__
void wrapAtomicAdd2Warp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
if (Iwrite) atomicAdd(addr, val);
}
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2(addr, val, key, sumFunObj);
#else
myAtomicAdd(addr, val, key, sumFunObj);
#endif
}
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicInc(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2((int*)addr, 1, key, sumFunObj);
#else
//myAtomicAdd((int*)addr, 1, key, sumFunObj);
#endif
}
template <typename SUMFUNTYPE>
static inline __device__
void wrapAtomicInc(int* addr, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2(addr, 1, key, sumFunObj);
#else
myAtomicAdd(addr, 1, key, sumFunObj);
#endif
}
template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAddWarp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2Warp<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex);
#else
myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex);
#endif
}
template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicIncWarp(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2Warp<laststeps>((int*)addr, 1, key, sumFunObj, Iwrite, warpmutex);
#else
//myAtomicAdd((int*)addr, 1, key, sumFunObj);
#endif
}
template <bool laststeps, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicIncWarp(int* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2Warp<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex);
#else
myAtomicWarpAdd<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex);
#endif
}
// TODO: Consider the following:
// First private hash for each warp - later, share hash-tables between warps
// Try also: private hashes for some threads of one warp etc
template <typename OUTPUTTYPE>
struct myHash
{
int* keys;
#if !USE_ATOMICS_HASH
int* locks;
#endif
OUTPUTTYPE* vals;
OUTPUTTYPE* myBlockOut;
};
template <typename OUTPUTTYPE>
static inline __device__
void InitHash(struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE zero, int hashSizelog2)
{
int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2;
int* myEntry = &hash->keys[threadIdx.x];
for (int i = 0; i < nloops; i++)
{
*myEntry = -1;
myEntry += LBLOCK_SIZE;
}
if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2))
{
*myEntry = -1;
}
// Done
}
#if 0 // OLD code
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void FlushHash(struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2)
{
int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2;
OUTPUTTYPE* myVal = &hash->vals[threadIdx.x];
int* key = &hash->keys[threadIdx.x];
for (int i = 0; i < nloops; i++) {
int keyIndex = *key;
if (keyIndex >= 0) {
hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]);
*key = -1;
}
key += LBLOCK_SIZE;
myVal += LBLOCK_SIZE;
}
if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2))
{
int keyIndex = *key;
if (keyIndex >= 0){
hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]);
*key = -1;
}
}
}
#endif // 0
// See: http://www.burtleburtle.net/bob/hash/doobs.html
// Mix by Bob Jenkins
#define HISTO_JENKINS_MIX(A, B, C) \
do { \
A -= B; A -= C; A ^= (C>>13); \
B -= C; B -= A; B ^= (A<<8); \
C -= A; C -= B; C ^= (B>>13); \
A -= B; A -= C; A ^= (C>>12); \
B -= C; B -= A; B ^= (A<<16); \
C -= A; C -= B; C ^= (B>>5); \
A -= B; A -= C; A ^= (C>>3); \
B -= C; B -= A; B ^= (A<<10); \
C -= A; C -= B; C ^= (B>>15); \
} while (0)
static inline __device__
unsigned int histogramHashFunction(int key)
{
#if USE_JENKINS_HASH
unsigned int a = (unsigned int)key;
unsigned int c,b;
// TODO: What are good constants?
b = 0x9e3779b9;
c = 0xf1232345;
HISTO_JENKINS_MIX(a, b, c);
return c;
#else
// Golden ratio hash
return (0x9e3779b9u * (unsigned int)key);
#endif
}
#if USE_ATOMICS_HASH
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique)
{
if (unique)
{
if (Iwrite)
{
hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]);
}
return;
}
unsigned int hashkey = histogramHashFunction(myKey);
volatile __shared__ bool hashFull;
int index = (int)(hashkey >> (32 - hashSizelog2));
bool Iamdone = !Iwrite;
bool IFlush = Iwrite;
hashFull = true;
while (hashFull)
{
// Mark here hash full, and if any thread has problems finding
// free entry in hash, then that thread sets hashFull to nonzero
if (threadIdx.x == 0) hashFull = false;
// Do atomic-part
int old = -2;
int expect = -1;
while (!Iamdone && !hashFull)
{
old = atomicCAS(&hash->keys[index], expect, -3);
if (old == expect) // We won!
{
int key = old;
if (key == -1 || key == myKey)
{
if (key == -1)
{
hash->vals[index] = res;
}
else
{
hash->vals[index] = sumfunObj(res, hash->vals[index]);
IFlush = false;
}
hash->keys[index] = myKey;
Iamdone = true;
}
else
{
hashFull = true;
hash->keys[index] = key;
expect = -1;
}
}
else
{
if (old != myKey)
{
hashFull = true;
expect = -1;
}
else
{
expect = old;
}
}
}
if (IFlush && Iamdone)
{
OUTPUTTYPE* myVal = &hash->vals[index];
int* key = &hash->keys[index];
// TODO: Workaround - get rid of if. Where do the extra flushes come from?
if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]);
//hash->myBlockOut[myKey] = sumfunObj(*myVal, hash->myBlockOut[myKey]);
*key = -1;
}
}
}
#else
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique)
{
if (unique)
{
if (Iwrite)
{
hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]);
}
return;
}
unsigned int hashkey = histogramHashFunction(myKey);
volatile __shared__ int hashFull;
int index = (int)(hashkey >> (32 - hashSizelog2));
bool Iamdone = false;
bool IFlush = Iwrite;
// TODO: syncthreads()...
hashFull = -10;
while (hashFull != 0)
{
volatile int* lock = &hash->locks[index];
bool write = Iwrite;
#define TMP_LOCK_MAGIC 0xfffffffe
*lock = TMP_LOCK_MAGIC;
// Mark here hash full, and if any thread has problems finding
// free entry in hash, then that thread sets hashFull to nonzero
if (threadIdx.x == 0) hashFull = 0;
// Do atomic-part
while (1)
{
if (!Iamdone && write) *lock = threadIdx.x;
if (*lock == TMP_LOCK_MAGIC)
break;
if (*lock == threadIdx.x) // We won!
{
int key = hash->keys[index];
if (key == -1)
{
hash->keys[index] = myKey;
hash->vals[index] = res;
Iamdone = true;
}
else if (key == myKey)
{
hash->vals[index] = sumfunObj(res, hash->vals[index]);
Iamdone = true;
IFlush = false;
}
else
{
hashFull = 1;
}
// Do arbitrary atomic op:
write = false;
*lock = TMP_LOCK_MAGIC;
}
}
if (IFlush)
{
OUTPUTTYPE* myVal = &hash->vals[index];
int* key = &hash->keys[index];
// TODO: Workaround - get rid of if. Where do the extra flushes come from?
if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]);
*key = -1;
}
}
#undef TMP_LOCK_MAGIC
}
#endif
template <histogram_type histotype, int nMultires, bool reduce, bool checkStrategy, bool laststep, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histo_largenbin_step(INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, OUTPUTTYPE zero,
INDEXT* myStart, INDEXT end, struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE* blockOut, int nOut, int stepNum, int stepsleft, int* nSameTot, bool* reduceOut, int hashSizelog2,
OUTPUTTYPE* rOuts, int* rKeys)
{
if (!laststep)
{
if (checkStrategy)
{
int myKeys[nMultires];
int nSame = 0;
OUTPUTTYPE res[nMultires];
xformObj(input, *myStart, &myKeys[0], &res[0], nMultires);
// TODO: Unroll? addtoHash is a big function.. Hmm but, unrolling would enable registers probably
bool Iwrite;
#define ADD_ONE_RESULT(RESIDX, NSAME, CHECK) \
do { if (RESIDX < nMultires) { \
Iwrite = reduceToUnique<histotype, CHECK> \
(&res[RESIDX % nMultires], myKeys[RESIDX % nMultires], NSAME, sumfunObj, rKeys, rOuts); \
if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; \
AddToHash(res[RESIDX % nMultires], myKeys[RESIDX % nMultires], hash, sumfunObj, hashSizelog2, Iwrite, true); \
} } while (0)
ADD_ONE_RESULT(0, &nSame, true);
ADD_ONE_RESULT(1, NULL, false);
ADD_ONE_RESULT(2, NULL, false);
ADD_ONE_RESULT(3, NULL, false);
#undef ADD_ONE_RESULT
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++)
{
bool Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts);
if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;
AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, true);
}
*nSameTot += nSame;
checkStrategyFun(reduceOut, nSame, *nSameTot, stepNum, 0);
*myStart += LBLOCK_SIZE;
}
else
{
INDEXT startLim = *myStart + ((LBLOCK_SIZE << LARGE_NBIN_CHECK_INTERVAL_LOG2) - LBLOCK_SIZE);
for (; *myStart < startLim; *myStart += LBLOCK_SIZE)
{
int myKeys[nMultires];
OUTPUTTYPE res[nMultires];
xformObj(input, *myStart, &myKeys[0], &res[0], nMultires);
//#pragma unroll
bool Iwrite = true;
#define ADD_ONE_RESULT(RES) \
do { if (RES < nMultires) { \
if (reduce){ Iwrite = reduceToUnique<histotype, false>(&res[RES % nMultires], \
myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;} \
AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, \
sumfunObj, hashSizelog2, Iwrite, reduce); \
} } while (0)
ADD_ONE_RESULT(0);
ADD_ONE_RESULT(1);
ADD_ONE_RESULT(2);
ADD_ONE_RESULT(3);
#undef ADD_ONE_RESULT
for (int resid = 4; resid < nMultires; resid++)
{
bool Iwrite = true;
if (reduce){
Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts);
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;
}
AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, reduce);
}
}
}
}
else // These are the last steps then
{
for (int substep = 0; substep < stepsleft; substep++)
{
int myKeys[nMultires];
OUTPUTTYPE res[nMultires];
bool Iwrite = false;
if (*myStart < end)
{
Iwrite = true;
xformObj(input, *myStart, &myKeys[0], &res[0], nMultires);
}
else
{
#pragma unroll
for (int resid = 0; resid < nMultires; resid++)
{
res[resid] = zero;
myKeys[resid] = 0;
}
}
//#pragma unroll
{
bool Iwrite2 = Iwrite;
#define ADD_ONE_RESULT(RES) \
do { if (RES < nMultires) { \
if (reduce){ Iwrite2 = reduceToUnique<histotype, false> \
(&res[RES % nMultires], myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } \
AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, sumfunObj, hashSizelog2, Iwrite2, reduce); \
} } while(0)
ADD_ONE_RESULT(0);
ADD_ONE_RESULT(1);
ADD_ONE_RESULT(2);
ADD_ONE_RESULT(3);
#undef ADD_ONE_RESULT
for (int resid = 4; resid < nMultires; resid++)
{
//bool Iwrite2 = true;
if (reduce){
Iwrite2 = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts);
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;
}
AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite2, reduce);
}
}
*myStart += LBLOCK_SIZE;
}
}
}
template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histo_kernel_largeNBins(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int nSteps,
int hashSizelog2)
{
extern __shared__ int keys[];
#if USE_ATOMICS_HASH
OUTPUTTYPE* vals = (OUTPUTTYPE*)(&keys[1 << hashSizelog2]);
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
vals = &keys[1 << LBLOCK_SIZE_LOG2];
#else
int* locks = &keys[1 << hashSizelog2];
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
locks = &keys[1 << LBLOCK_SIZE_LOG2];
OUTPUTTYPE* vals = (OUTPUTTYPE*)(&locks[1 << hashSizelog2]);
#endif
/*int* rKeys = (int*)(&vals[1 << hashSizelog2]);
OUTPUTTYPE* rOuts = (OUTPUTTYPE*)(&rKeys[LBLOCK_SIZE]);*/
int* rKeys = &keys[0];
OUTPUTTYPE* rOuts = vals;
struct myHash<OUTPUTTYPE> hash;
hash.keys = keys;
#if !USE_ATOMICS_HASH
hash.locks = locks;
#endif
hash.vals = vals;
// Where do we put the results from our warp (block)?
hash.myBlockOut = &blockOut[nOut * blockIdx.x];
INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << LBLOCK_SIZE_LOG2) + threadIdx.x);
// Assert that myStart is not out of bounds!
int nFullSteps = nSteps >> LARGE_NBIN_CHECK_INTERVAL_LOG2;
bool reduce = false;
InitHash(&hash, zero, hashSizelog2);
int nSameTot = 0;
for (int fstep = 0; fstep < nFullSteps; fstep++)
{
int stepNum = fstep << LARGE_NBIN_CHECK_INTERVAL_LOG2;
histo_largenbin_step<histotype, nMultires, true, true, false,INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
if (reduce) {
histo_largenbin_step<histotype, nMultires, true, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
} else {
histo_largenbin_step<histotype, nMultires, false, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
}
}
// Last steps
int nstepsleft = nSteps - (nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2);
if (nstepsleft > 0)
{
int stepNum = nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2;
if (reduce)
histo_largenbin_step<histotype, nMultires, true, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
else
histo_largenbin_step<histotype, nMultires, false, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
}
// Flush values still in hash
//FlushHash(&hash, sumfunObj, hashSizelog2);
}
#if USE_MEDIUM_PATH
//
template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histo_kernel_mediumNBins(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int nSteps)
{
#if __CUDA_ARCH__ >= 120
OUTPUTTYPE* ourOut = &blockOut[nOut * (threadIdx.x % MED_THREAD_DEGEN) * blockIdx.x];
INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << MEDIUM_BLOCK_SIZE_LOG2) + threadIdx.x);
bool reduce = false;
int nSameTot = 0;
for (int step = 0; step < nSteps - 1; step++)
{
bool check = false;
int myKey[nMultires];
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKey[0], &myOut[0],nMultires);
// TODO: magic constant
if ((step & 63) == 0)
check = true;
{
int nSame;
__shared__ int keys[MEDIUM_BLOCK_SIZE];
__shared__ OUTPUTTYPE rOut[MEDIUM_BLOCK_SIZE];
int warpIdx = threadIdx.x >> 5;
int* wkeys = &keys[warpIdx << 5];
OUTPUTTYPE* wOut = &rOut[warpIdx << 5];
bool Iwrite;
#define ADD_ONE_RESULT(RESID) \
do { if (RESID < nMultires) { \
if (reduce || check){ \
if (check) Iwrite = reduceToUnique<histotype, true> \
(&myOut[RESID % nMultires], myKey[RESID % nMultires], \
&nSame, sumfunObj, wkeys, wOut); \
else Iwrite = reduceToUnique<histotype, false> \
(&myOut[RESID % nMultires], myKey[RESID % nMultires], NULL, sumfunObj, \
wkeys, wOut); \
if (Iwrite) \
atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \
if (check){ \
nSameTot += nSame; \
checkStrategyFun(&reduce, nSame, nSameTot, step, 0); \
check = false; \
} \
} else { \
if (histotype == histogram_atomic_inc) \
atomicAdd(&ourOut[myKey[RESID % nMultires]], 1); \
else if (histotype == histogram_atomic_add) \
atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \
} } \
} while(0)
ADD_ONE_RESULT(0);
ADD_ONE_RESULT(1);
ADD_ONE_RESULT(2);
ADD_ONE_RESULT(3);
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++)
{
ADD_ONE_RESULT(resid);
}
}
myStart += MEDIUM_BLOCK_SIZE;
}
if (myStart < end)
{
int myKey[nMultires];
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKey[0], &myOut[0],nMultires);
for (int resid = 0; resid < nMultires; resid++)
{
if (histotype == histogram_atomic_inc)
{
atomicAdd(&ourOut[myKey[resid]], 1);
}
else if (histotype == histogram_atomic_add)
{
atomicAdd(&ourOut[myKey[resid]], myOut[resid]);
}
}
}
#endif // __CUDA_ARCH__
}
#endif // USE_MEDIUM_PATH
static int determineHashSizeLog2(size_t outSize, int* nblocks, hipDeviceProp_t* props)
{
// TODO: Magic hat-constant 500 reserved for inputs, how to compute?
int sharedTot = (props->sharedMemPerBlock - 500) /* / LBLOCK_WARPS*/;
//int sharedTot = 32000;
// How many blocks of 32 keys could we have?
//int nb32Max = sharedTot / (32 * outSize);
// But ideally we should run at least 4 active blocks per SM,
// How can we balance this? Well - with very low ablock-values (a),
// we perform bad, but after 4, adding more
// will help less and less, whereas adding more to the hash always helps!
#if USE_ATOMICS_HASH
outSize += sizeof(int);
#else
outSize += sizeof(int);
#endif
int naMax = sharedTot / (32 * outSize);
while (naMax > numActiveUpperLimit) naMax >>= 1;
int nb32 = sharedTot / (32 * outSize * naMax);
// Now we have "number of pieces", use it to compute some nice power-of-two hash-size
int hashSize = nb32 * 32;
unsigned int res = 0;
if (hashSize >= 1<<16) { hashSize >>= 16; res += 16; }
if (hashSize >= 1<< 8) { hashSize >>= 8; res += 8; }
if (hashSize >= 1<< 4) { hashSize >>= 4; res += 4; }
if (hashSize >= 1<< 2) { hashSize >>= 2; res += 2; }
if (hashSize >= 1<< 1) { res += 1; }
// Now res holds the log2 of hash size => n active blocksMEDIUM_BLOCK_SIZE_LOG2 = sharedTot / (outSize << res);
*nblocks = (sharedTot / (outSize << res)) * props->multiProcessorCount;
if (*nblocks > props->multiProcessorCount * 8) *nblocks = props->multiProcessorCount * 8;
return res;
}
template <typename OUTPUTTYPE>
__global__
void initKernel(OUTPUTTYPE* tmpOut, OUTPUTTYPE zeroVal, int tmpOutSize, int steps)
{
int idx = blockIdx.x * blockDim.x * steps + threadIdx.x;
for (int step = 0; step < steps; step++)
{
if (idx < tmpOutSize)
tmpOut[idx] = zeroVal;
idx += blockDim.x;
}
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getLargeBinTmpbufsize(int nOut, hipDeviceProp_t* props, int cuda_arch)
{
int nblocks;
int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props);
int arrLen = nblocks;
#if USE_MEDIUM_PATH
if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add))
arrLen *= MED_THREAD_DEGEN;
#endif
return (arrLen + 1) * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static
void callHistogramKernelLargeNBins(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
hipDeviceProp_t* props, int cuda_arch, hipStream_t stream,
int* getTmpBufSize,
void* tmpBuffer,
bool outInDev)
{
int nblocks;
int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props);
INDEXT size = end - start;
// Check if there is something to do actually...
if (end <= start)
{
if (getTmpBufSize) getTmpBufSize = 0;
return;
}
dim3 block = LBLOCK_SIZE;
dim3 grid = nblocks;
int arrLen = nblocks;
#if USE_MEDIUM_PATH
if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add))
arrLen *= MED_THREAD_DEGEN;
#endif
INDEXT nSteps = size / (INDEXT)( LBLOCK_SIZE * nblocks);
OUTPUTTYPE* tmpOut;
//int n = nblocks;
if (getTmpBufSize) {
*getTmpBufSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE);
return;
}
if (tmpBuffer){
tmpOut = (OUTPUTTYPE*)tmpBuffer;
}
else {
size_t allocSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE);
hipMalloc((void**)&tmpOut, allocSize);
}
//printf("Using hash-based histogram: hashsize = %d, nblocksToT = %d\n", (1 << hashSizelog2), nblocks);
#if USE_ATOMICS_HASH
int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int));
#else
int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int) * 2);
#endif
// The shared memory here is needed for the reduction code (ie. reduce to unique)
// TODO: new hash-code could probably reuse the memory reserved for the hash-table,
// it would just need to reinit the keys to -1 after use - think about it.
if (cuda_arch >= 200 && histotype == histogram_atomic_inc)
{
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
extSharedNeeded += (sizeof(int) << (LBLOCK_SIZE_LOG2 - hashSizelog2));
}
else
{
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << (LBLOCK_SIZE_LOG2 - hashSizelog2));
}
//printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps);
{
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((arrLen * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps <<= 1;
if (nsteps * initgrid.x * IBLOCK_SIZE < arrLen * nOut)
initgrid.x++;
}
hipLaunchKernelGGL(( initKernel), dim3(initgrid),dim3(initblock),0,stream, tmpOut, zero, arrLen * nOut, nsteps);
}
//int medExtShared = nOut;
//const int shLimit = 0;
//const int shLimit = 0;//16000 / 2;
// Codepath below is a lot faster for random bins, a tad faster for real use-case
// and a lot slower for degenerate key-distributions
#if USE_MEDIUM_PATH
if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add))
{
const dim3 block = MEDIUM_BLOCK_SIZE;
dim3 grid = nblocks;
INDEXT nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks);
INDEXT nFullSteps = 1;
if (nSteps <= 0)
{
nFullSteps = 0;
nblocks = (size >> MEDIUM_BLOCK_SIZE_LOG2);
if ((nblocks << MEDIUM_BLOCK_SIZE_LOG2) < size) nblocks++;
}
if (nSteps > MAX_NLHSTEPS)
{
nFullSteps = size / ( MEDIUM_BLOCK_SIZE * nblocks * MAX_NLHSTEPS);
nSteps = MAX_NLHSTEPS;
}
for (INDEXT step = 0; step < nFullSteps; step++)
{
hipLaunchKernelGGL(( histo_kernel_mediumNBins<histotype, nMultires>), dim3(grid), dim3(block), 0, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps);
start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps);
}
size = end - start;
nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks);
if (nSteps > 0)
{
hipLaunchKernelGGL(( histo_kernel_mediumNBins<histotype, nMultires>), dim3(grid), dim3(block), 0, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps);
start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps);
size = end - start;
}
if (size > 0)
{
int ntblocks = size / ( MEDIUM_BLOCK_SIZE );
if (ntblocks * MEDIUM_BLOCK_SIZE < size) ntblocks++;
grid.x = ntblocks;
hipLaunchKernelGGL(( histo_kernel_mediumNBins<histotype, nMultires>), dim3(grid), dim3(block), 0, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1);
}
}
else
#endif // USE_MEDIUM_PATH
{
INDEXT nFullSteps = 1;
if (nSteps <= 0)
{
nFullSteps = 0;
nblocks = (size >> LBLOCK_SIZE_LOG2);
if ((nblocks << LBLOCK_SIZE_LOG2) < size) nblocks++;
}
if (nSteps > MAX_NLHSTEPS)
{
nFullSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks * MAX_NLHSTEPS);
nSteps = MAX_NLHSTEPS;
}
for (int step = 0; step < nFullSteps; step++)
{
hipLaunchKernelGGL(( histo_kernel_largeNBins<histotype, nMultires>), dim3(grid), dim3(block), extSharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2);
start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps);
}
size = end - start;
nSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks);
if (nSteps > 0)
{
hipLaunchKernelGGL(( histo_kernel_largeNBins<histotype, nMultires>), dim3(grid), dim3(block), extSharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2);
start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps);
size = end - start;
}
if (size > 0)
{
int ntblocks = size / ( LBLOCK_SIZE );
if (ntblocks * LBLOCK_SIZE < size) ntblocks++;
grid.x = ntblocks;
hipLaunchKernelGGL(( histo_kernel_largeNBins<histotype, nMultires>), dim3(grid), dim3(block), extSharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1, hashSizelog2);
}
}
#if H_ERROR_CHECKS
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror = %s\n", hipGetErrorString( error ));
#endif
// OK - so now tmpOut contains our gold - we just need to dig it out now
enum hipMemcpyKind fromOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyHostToDevice;
enum hipMemcpyKind toOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost;
if (stream != 0)
hipMemcpyAsync(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream);
else
hipMemcpy(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut);
grid.x = nOut;
//grid.x = nOut >> LBLOCK_SIZE_LOG2;
//if ((grid.x << LBLOCK_SIZE_LOG2) < nOut) grid.x++;
block.x = GATHER_BLOCK_SIZE;
hipLaunchKernelGGL(( gatherKernel), dim3(grid), dim3(block), 0, stream, sumfunObj, tmpOut, nOut, arrLen /** LBLOCK_WARPS*/, zero);
// TODO: Async copy here also???
if (outInDev && stream != 0)
hipMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream);
else
hipMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut);
// CPU-code path for debugging here:
/* {
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(nblocks * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
hipMemcpy(h_tmp, tmpOut, nblocks*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < nblocks; i++)
{
res = sumfunObj(res, h_tmp[i * nOut + resIdx]);
}
out[resIdx] = sumfunObj(res, out[resIdx]);
}
free(h_tmp);
}
*/
if (!tmpBuffer)
hipFree(tmpOut);
}
static int determineNKeySetsLog2(size_t size_out, int nOut, hipDeviceProp_t* props)
{
// 32 threads per block, one block shares one binset
// Go for 2x occupancy = 64 active threads per block
// Hence if we have NBinSets, then we need tot_size x nOut x NBinSets x 2 bytes of shared
// On sm_20 we have 48 000 bytes and on sm_1x 16 000
// Hence nbinsets = SharedMem / (2 * tot_size * nOut)
// For example sm_20, 16 int bins:
// nbinsets = 48000 / 2 * 4 * 16 = 48000 / 2*64 = 48000 / 128 = 375...
// More than enough, but is it enough active threadblocks??
int nBytesShared = 16000;
size_t sizetot = size_out + sizeof(int);
int nBinSets = nBytesShared / (sizetot * 2 * nOut);
// NOTE: Disabling for now - advantages seem nonexistent
// if (nBinSets >= 32) return 5;
// if (nBinSets >= 16) return 4;
// if (nBinSets >= 8) return 3;
// if (nBinSets >= 4) return 2;
// if (nBinSets >= 2) return 1;
if (nBinSets >= 1) return 0;
return -1;
}
#if __CUDA_ARCH__ >= 200
template <int nMultires>
static inline __device__
bool checkForReduction (int* myKeys, int* rkeys)
{
// Idea - if there is a large number of degenerate entries then we don't need to check them all for degeneracy
// TODO: Implement the wonderful idea
//return ((threadIdx.x >> 5) & 3) < 3;
#if 1
bool myKeyDegenerate;
//TAKE_WARP_MUTEX(0);
rkeys[threadIdx.x & 31] = myKeys[0];
// Check two thirds
myKeyDegenerate =
(myKeys[0] == (rkeys[(threadIdx.x + 1) & 31]))
/*||
(myKeys[0] == (rkeys[(threadIdx.x + 8) & 31]))*/;
//GIVE_WARP_MUTEX(0);
unsigned int degenMask = __ballot(myKeyDegenerate);
// Estimate number of degenerate keys - if all are degenerate, the estimate is accurate
int nDegen = __popc(degenMask);
if (nDegen > HISTOGRAM_DEGEN_LIMIT)
return true;
else
return false;
#endif
}
#endif
template <histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histogramKernel_stepImpl(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT end,
OUTPUTTYPE zero,
int nOut, INDEXT startidx,
OUTPUTTYPE* bins, int* locks,
OUTPUTTYPE* rvals, int* rkeys,
int* doReduce, bool checkReduce,
int* warpmutex)
{
int myKeys[nMultires];
OUTPUTTYPE vals[nMultires];
bool doWrite = true;
if (laststeps){
if (startidx < end)
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
else
{
doWrite = false;
#pragma unroll
for (int r = 0; r < nMultires; r++){
vals[r] = zero;
myKeys[r] = -1;
}
}
}
else
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
// See keyIndex-reasoning above
int binSet = (threadIdx.x & ((1 << nBinSetslog2) - 1));
#if __CUDA_ARCH__ >= 200
/* if (laststeps){
*doReduce = false;
}
else*/
{
if (checkReduce){
*doReduce = checkForReduction<nMultires>(myKeys, rkeys);
if (histotype == histogram_generic || histotype == histogram_atomic_add){
__shared__ int tmp;
tmp = 0;
__syncthreads();
if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1);
__syncthreads();
if (tmp > HBLOCK_SIZE / 2)
*doReduce = true;
else
*doReduce = false;
}
//if (laststeps) *doReduce = false;
/* __syncthreads();
bool tmpred = checkForReduction<nMultires>(myKeys, rkeys);
if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred);
__syncthreads();*/
}
}
#endif
// TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?)
// TODO: How to avoid bank-conflicts? Any way to avoid?
#if __CUDA_ARCH__ >= 200
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \
if (*doReduce){\
if (histotype == histogram_generic || histotype == histogram_atomic_add){\
bool Iwrite;\
TAKE_WARP_MUTEX(0);\
Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
if (Iwrite && doWrite) bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\
/*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\
else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\
GIVE_WARP_MUTEX(0);\
} else { \
bool Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex); \
}\
} else {\
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
}\
} } } while (0)
#else
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
}\
} } while (0)
#endif
ONE_HS_STEP(0);
ONE_HS_STEP(1);
ONE_HS_STEP(2);
ONE_HS_STEP(3);
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++){
ONE_HS_STEP(resid);
}
#undef ONE_HS_STEP
}
template <int nBinSetslog2, histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histogramKernel_sharedbins_new(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int outStride,
int nSteps)
{
extern __shared__ int cudahistogram_binstmp[];
OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp);
int* locks = (int*)&bins[(nOut << nBinSetslog2)];
int* rkeys = NULL;
OUTPUTTYPE* rvals = NULL;
//__shared__
int warpmutex;
//INIT_WARP_MUTEX2(warpmutex);
#if __CUDA_ARCH__ >= 200
int warpId = threadIdx.x >> 5;
if (histotype == histogram_generic)
rkeys = &locks[(nOut << nBinSetslog2)];
else
rkeys = locks;
rvals = (OUTPUTTYPE*)&rkeys[32];
if (histotype == histogram_atomic_inc){
rkeys = &rkeys[warpId << 5];
//rvals = &rvals[warpId << 5];
}
#endif
const int nBinSets = 1 << nBinSetslog2;
// Reset all bins to zero...
for (int j = 0; j < ((nOut << nBinSetslog2) >> HBLOCK_SIZE_LOG2) + 1; j++)
{
int bin = (j << HBLOCK_SIZE_LOG2) + threadIdx.x;
if (bin < (nOut << nBinSetslog2)){
bins[bin] = zero;
}
}
#if HBLOCK_SIZE > 32
__syncthreads();
#endif
int outidx = blockIdx.x;
INDEXT startidx = (INDEXT)((outidx * nSteps) * HBLOCK_SIZE + start + threadIdx.x);
/*__shared__*/ int doReduce; // local var - TODO: Is this safe??
doReduce = 0;
#define MED_UNROLL_LOG2 2
#define MED_UNROLL (1 << MED_UNROLL_LOG2)
int step;
for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++)
{
//#pragma unroll
//for (int substep = 0; substep < MED_UNROLL; substep++){
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex);
startidx += HBLOCK_SIZE;
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex);
startidx += HBLOCK_SIZE;
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex);
startidx += HBLOCK_SIZE;
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex);
startidx += HBLOCK_SIZE;
//}
}
step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2;
for (; step < nSteps ; step++)
{
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex);
startidx += HBLOCK_SIZE;
}
#undef MED_UNROLL
#undef MED_UNROLL_LOG2
#if HBLOCK_SIZE > 32
__syncthreads();
#endif
// Finally put together the bins
for (int j = 0; j < (nOut >> HBLOCK_SIZE_LOG2) + 1; j++) {
int key = (j << HBLOCK_SIZE_LOG2) + threadIdx.x;
if (key < nOut)
{
OUTPUTTYPE res = blockOut[key * outStride + outidx];
//int tmpBin = bin;
#pragma unroll
for (int k = 0; k < nBinSets; k++)
{
//tmpBin += nOut;
res = sumfunObj(res, bins[(key << nBinSetslog2) + k]);
}
//printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin);
blockOut[key * outStride + outidx] = res;
}
}
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getMediumHistoTmpbufSize(int nOut, hipDeviceProp_t* props)
{
int nblocks = props->multiProcessorCount * 8;
// NOTE: The other half is used by multireduce...
return 2 * nblocks * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
void callHistogramKernelImpl(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
hipDeviceProp_t* props,
hipStream_t stream,
size_t* getTmpBufSize,
void* tmpBuffer,
bool outInDev,
int cuda_arch)
{
INDEXT size = end - start;
// Check if there is something to do actually...
if (end <= start)
{
if (getTmpBufSize) *getTmpBufSize = 0;
return;
}
int nblocks = props->multiProcessorCount * 8;
// Assert that our grid is not too large!
//MY_ASSERT(n < 65536 && "Sorry - currently we can't do such a big problems with histogram-kernel...");
// One entry for each output for each thread-block:
//OUTPUTTYPE* tmpOut = (OUTPUTTYPE*)parallel_alloc(MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
OUTPUTTYPE* tmpOut;
if (getTmpBufSize)
{
// NOTE: The other half is used by multireduce...
*getTmpBufSize = 2 * nblocks * nOut * sizeof(OUTPUTTYPE);
return;
}
int nsteps = size / ( nblocks * HBLOCK_SIZE );
if (nsteps * nblocks * HBLOCK_SIZE < size) nsteps++;
if (nsteps > MAX_NHSTEPS)
nsteps = MAX_NHSTEPS;
if (tmpBuffer)
{
char* tmpptr = (char*)tmpBuffer;
tmpOut = (OUTPUTTYPE*)tmpBuffer;
tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)];
}
else
{
hipMalloc((void**)&tmpOut, nblocks * nOut * sizeof(OUTPUTTYPE));
}
/* For block size other that power of two:
const dim3 grid = size / BLOCK_SIZE +
( size % BLOCK_SIZE == 0 ? 0 : 1 );
*/
//MY_ASSERT(size > 0);
//hipMemsetAsync(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE), CURRENT_STREAM() );
//hipMemset(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE) );
{
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps <<= 1;
if (nsteps * initgrid.x * IBLOCK_SIZE < nblocks * nOut)
initgrid.x++;
}
hipLaunchKernelGGL(( initKernel), dim3(initgrid),dim3(initblock),0,stream, tmpOut, zero, nblocks * nOut, nsteps);
#undef IBLOCK_SIZE_LOG2
#undef IBLOCK_SIZE
}
int nKeysetslog2 = determineNKeySetsLog2(sizeof(OUTPUTTYPE), nOut, props);
if (nKeysetslog2 < 0) nKeysetslog2 = 0;
int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE)); // bins
if (histotype == histogram_generic || cuda_arch < 130)
extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int)); // locks
if (cuda_arch >= 200)
{
// Reduction stuff:
if (histotype == histogram_generic || histotype == histogram_atomic_add)
{
extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values
}
else
{
extSharedNeeded += (sizeof(int) << HBLOCK_SIZE_LOG2); // keys per warp of one thread
}
}
/*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HBLOCK_SIZE);
if (nOut < HBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HBLOCK_SIZE - nOut);
if (cuda_arch < 130)
extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/
//printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps);
int nOrigBlocks = nblocks;
INDEXT myStart = start;
while(myStart < end)
{
bool lastStep = false;
if (myStart + nsteps * nblocks * HBLOCK_SIZE > end)
{
size = end - myStart;
nsteps = (size) / (nblocks * HBLOCK_SIZE);
if (nsteps < 1)
{
lastStep = true;
nsteps = 1;
nblocks = size / HBLOCK_SIZE;
if (nblocks * HBLOCK_SIZE < size)
nblocks++;
}
}
dim3 grid = nblocks;
dim3 block = HBLOCK_SIZE;
switch (nKeysetslog2)
{
case 0:
if (lastStep)
hipLaunchKernelGGL(( histogramKernel_sharedbins_new<0, histotype, nMultires, true>), dim3(grid), dim3(block), extSharedNeeded, stream,
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps);
else
hipLaunchKernelGGL(( histogramKernel_sharedbins_new<0, histotype, nMultires, false>), dim3(grid), dim3(block), extSharedNeeded, stream,
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps);
break;
/* case 1:
histogramKernel_sharedbins_new<1, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 2:
histogramKernel_sharedbins_new<2, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 3:
histogramKernel_sharedbins_new<3, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 4:
histogramKernel_sharedbins_new<4, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 5:
histogramKernel_sharedbins_new<5, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;*/
case -1:
// TODO: Error?
//assert(0); // "Sorry - not implemented yet"
break;
}
myStart += nsteps * nblocks * HBLOCK_SIZE;
}
#if H_ERROR_CHECKS
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror = %s\n", hipGetErrorString( error ));
#endif
// OK - so now tmpOut contains our gold - we just need to dig it out now
callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev);
// Below same as host-code
#if 0
{
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
hipMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < n; i++)
{
res = sumfunObj(res, h_tmp[i + resIdx * n]);
}
out[resIdx] = res;
}
free(h_tmp);
}
#endif
//parallel_free(tmpOut, MemType_DEV);
if (!tmpBuffer)
hipFree(tmpOut);
}
template <typename OUTTYPE>
static
bool binsFitIntoShared(int nOut, OUTTYPE zero, hipDeviceProp_t* props, int cuda_arch)
{
// Assume here we can only use 16kb of shared in total per SM
// Also lets take minimal of 2 threads per functional unit active, in
// order to be able to hide at least some latencies - for Fermi this means 32 * 2 = 64
// of active threads needed in total (Note: This is minimal and will hurt perf).
// Also we run blocks of 32 threads and each block needs its own bin - therefore
// we need in total 2 full bin-sets per SM plus 32 bins for the one for the working part
// of the algorithm.
// Due to these considerations we infer that we can fit it nicely in, if
// (4 binsets x Nbins/binset + 32) x sizeof(OUTYPE) < 16kib - let's take here 16kb to have some room
// for required parameters
// Example: 64 doubles: 8bytes per number double => (4 * 64 + 32) * 8bytes = 288 * 8 bytes = 2304 bytes -> Easy
// How many bins of doubles can we do with these limits?
// ( 4 * x + 32) * 8bytes = 16000 bytes <=> 4x = 2000 - 32 => x = 2000/4 - 32/4 = 500 - 8 = 492 bins.
// TODO: A possibly faster version of this would be to share one set of bins over as many warps as possible
// for example, if we would use 512 threads = 16 warps, then this would be fine for hiding probably all major latencies
// and we could get away with just one binset on SM:
// ( x + 512 ) * 8bytes = 16000 bytes <=> x = 2000 - 512 = 1488 bins! With better latency-hiding
// On the other hand this requires atomic operations on the shared memory, which could be somewhat slower on
// arbitrary types, but all in all, this would seem to provide a better route. At least worth investigating...
int shlimit = props->sharedMemPerBlock - 300;
int limit = shlimit;
// TODO: Pessimistic limit
int need = (sizeof(zero) + sizeof(int)) * nOut;
if (cuda_arch >= 200)
need += HBLOCK_SIZE * sizeof(int) + 32 * sizeof(zero);
if (need <= limit)
return true;
return false;
}
template <bool subHisto, histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histogramKernel_stepImplMulti(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT end,
OUTPUTTYPE zero,
int subsize, INDEXT startidx,
OUTPUTTYPE* bins, int* locks,
OUTPUTTYPE* rvals, int* rkeys,
int* doReduce, bool checkReduce,
int* warpmutex, int binOffset)
{
int myKeys[nMultires];
OUTPUTTYPE vals[nMultires];
bool doWrite = true;
if (laststeps){
if (startidx < end)
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
else
{
doWrite = false;
#pragma unroll
for (int r = 0; r < nMultires; r++){
vals[r] = zero;
myKeys[r] = -1;
}
}
}
else
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
#if __CUDA_ARCH__ >= 200
/* if (laststeps){
*doReduce = false;
}
else*/
{
if (checkReduce){
*doReduce = checkForReduction<nMultires>(myKeys, rkeys);
if (histotype == histogram_generic || histotype == histogram_atomic_add){
__shared__ int tmp;
tmp = 0;
__syncthreads();
if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1);
__syncthreads();
if (tmp > HMBLOCK_SIZE / 2)
*doReduce = true;
else
*doReduce = false;
}
//if (laststeps) *doReduce = false;
/* __syncthreads();
bool tmpred = checkForReduction<nMultires>(myKeys, rkeys);
if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred);
__syncthreads();*/
}
}
#endif
// TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?)
// TODO: How to avoid bank-conflicts? Any way to avoid?
#if __CUDA_ARCH__ >= 200
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \
bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\
if (!Iwrite) keyIndex = 0; \
if (*doReduce){\
if (histotype == histogram_generic || histotype == histogram_atomic_add){\
TAKE_WARP_MUTEX(0);\
bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
if (Iwrite && Iwrite2) \
bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\
/*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\
else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\
GIVE_WARP_MUTEX(0);\
} else { \
bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && Iwrite2, warpmutex); \
}\
} else {\
if (!Iwrite) keyIndex = 0;\
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
}\
} } } while (0)
#else
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \
bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\
if (!Iwrite) keyIndex = 0;\
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
}\
} } while (0)
#endif
ONE_HS_STEP(0);
ONE_HS_STEP(1);
ONE_HS_STEP(2);
ONE_HS_STEP(3);
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++){
ONE_HS_STEP(resid);
}
#undef ONE_HS_STEP
}
template <histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histogramKernel_multipass(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int outStride,
int nSteps,
int subsize)
{
extern __shared__ int cudahistogram_binstmp[];
OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp);
int* locks = (int*)&bins[subsize];
int* rkeys = NULL;
OUTPUTTYPE* rvals = NULL;
//__shared__
int warpmutex;
//INIT_WARP_MUTEX2(warpmutex);
#if __CUDA_ARCH__ >= 200
int warpId = threadIdx.x >> 5;
if (histotype == histogram_generic)
rkeys = &locks[subsize];
else
rkeys = locks;
rvals = (OUTPUTTYPE*)&rkeys[32];
if (histotype == histogram_atomic_inc){
rkeys = &rkeys[warpId << 5];
//rvals = &rvals[warpId << 5];
}
#endif
// Reset all bins to zero...
for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++)
{
int bin = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x;
if (bin < subsize){
bins[bin] = zero;
}
}
#if HMBLOCK_SIZE > 32
__syncthreads();
#endif
int outidx = blockIdx.y;
int binOffset = blockIdx.x * subsize;
INDEXT startidx = (INDEXT)((outidx * nSteps) * HMBLOCK_SIZE + start + threadIdx.x);
int doReduce; // local var - TODO: Is this safe??
doReduce = 0;
#define MED_UNROLL_LOG2 2
#define MED_UNROLL (1 << MED_UNROLL_LOG2)
int step;
for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++)
{
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
}
step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2;
for (; step < nSteps ; step++)
{
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
}
#undef MED_UNROLL
#undef MED_UNROLL_LOG2
#if HMBLOCK_SIZE > 32
__syncthreads();
#endif
// Finally put together the bins
for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++) {
int key = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x;
if (key < subsize)
{
OUTPUTTYPE res = blockOut[(key + binOffset) * outStride + outidx];
//int tmpBin = bin;
res = sumfunObj(res, bins[key]);
//printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin);
blockOut[(key + binOffset) * outStride + outidx] = res;
}
}
}
static int determineSubHistoSize(int nOut, size_t outsize, histogram_type histotype, int cuda_arch, hipDeviceProp_t* props)
{
int shlimit = props->sharedMemPerBlock - 300;
int neededPerKey = outsize;
if (histotype == histogram_generic || cuda_arch < 130)
neededPerKey += (sizeof(int)); // locks
int neededConst = 0;
if (cuda_arch >= 200)
{
// Reduction stuff:
if (histotype == histogram_generic || histotype == histogram_atomic_add)
{
neededConst += (outsize + sizeof(int)) << 5; // reduction values
}
else
{
neededConst += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread
}
}
int result = (shlimit - neededConst) / (2*neededPerKey);
int res = 0;
if (result >= 1<<16) { result >>= 16; res += 16; }
if (result >= 1<< 8) { result >>= 8; res += 8; }
if (result >= 1<< 4) { result >>= 4; res += 4; }
if (result >= 1<< 2) { result >>= 2; res += 2; }
if (result >= 1<< 1) { res += 1; }
return (1 << res);
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getMultipassBufSize(int nOut, hipDeviceProp_t* props, int cuda_arch)
{
int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props);
int nDegenBlocks = nOut / subsize;
if (subsize * nDegenBlocks < nOut) nDegenBlocks++;
int nblocks = props->multiProcessorCount;
if (nDegenBlocks < 8)
nblocks = props->multiProcessorCount * 8 / nDegenBlocks;
//int nblocks = props->multiProcessorCount * 8;
// NOTE: The other half is used by multireduce...
//printf("getMultipassBufSize(%d) = %d\n", nOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE));
return 2 * nblocks * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
void callHistogramKernelMultiPass(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
hipDeviceProp_t* props,
hipStream_t stream,
void* tmpBuffer,
bool outInDev,
int cuda_arch)
{
INDEXT size = end - start;
if (end <= start)
return;
//int debugs = 0;
int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props);
int nDegenBlocks = nOut / subsize;
if (subsize * nDegenBlocks < nOut) nDegenBlocks++;
int nblocks = props->multiProcessorCount;
if (nDegenBlocks < 8)
nblocks = props->multiProcessorCount * 8 / nDegenBlocks;
OUTPUTTYPE* tmpOut;
int nsteps = size / ( nblocks * HMBLOCK_SIZE );
if (nsteps * nblocks * HMBLOCK_SIZE < size) nsteps++;
if (nsteps > MAX_MULTISTEPS)
nsteps = MAX_MULTISTEPS;
//printf(" <debugstep = %d> ", debugs++);
bool userBuffer = false;
if (tmpBuffer)
{
char* tmpptr = (char*)tmpBuffer;
tmpOut = (OUTPUTTYPE*)tmpBuffer;
tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)];
userBuffer = true;
//printf("tmpBuffer = &tmpptr[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE));
}
else
{
hipMalloc((void**)&tmpOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE));
//printf("tmpOut = malloc(%d)\n", 2 * nblocks * nOut * sizeof(OUTPUTTYPE));
//tmpBuffer = (void*)&tmpOut[nblocks * nOut * sizeof(OUTPUTTYPE)];
//printf("tmpBuffer = &tmpOut[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE));
}
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps2 = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps2 <<= 1;
if (nsteps2 * initgrid.x * IBLOCK_SIZE < nblocks * nOut)
initgrid.x++;
}
hipLaunchKernelGGL(( initKernel), dim3(initgrid),dim3(initblock),0,stream, tmpOut, zero, nblocks * nOut, nsteps2);
#undef IBLOCK_SIZE_LOG2
#undef IBLOCK_SIZE
int extSharedNeeded = subsize * (sizeof(OUTPUTTYPE)); // bins
if (histotype == histogram_generic || cuda_arch < 130)
extSharedNeeded += subsize * (sizeof(int)); // locks
if (cuda_arch >= 200)
{
// Reduction stuff:
if (histotype == histogram_generic || histotype == histogram_atomic_add)
{
extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values
}
else
{
extSharedNeeded += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread
}
}
//printf(" <debugstep(init) = %d> ", debugs++);
/*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HMBLOCK_SIZE);
if (nOut < HMBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HMBLOCK_SIZE - nOut);
if (cuda_arch < 130)
extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/
//printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps);
int nOrigBlocks = nblocks;
INDEXT myStart = start;
while(myStart < end)
{
bool lastStep = false;
if (myStart + nsteps * nblocks * HMBLOCK_SIZE > end)
{
size = end - myStart;
nsteps = (size) / (nblocks * HMBLOCK_SIZE);
if (nsteps < 1)
{
lastStep = true;
nsteps = 1;
nblocks = size / HMBLOCK_SIZE;
if (nblocks * HMBLOCK_SIZE < size)
nblocks++;
}
}
dim3 grid;
grid.y = nblocks;
grid.x = nDegenBlocks;
dim3 block = HMBLOCK_SIZE;
//printf(" <debugstep(main) = %d> ", debugs++);
if (lastStep)
hipLaunchKernelGGL(( histogramKernel_multipass<histotype, nMultires, true>), dim3(grid), dim3(block), extSharedNeeded, stream,
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize);
else
hipLaunchKernelGGL(( histogramKernel_multipass<histotype, nMultires, false>), dim3(grid), dim3(block), extSharedNeeded, stream,
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize);
myStart += nsteps * nblocks * HMBLOCK_SIZE;
}
#if H_ERROR_CHECKS
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror = %s\n", hipGetErrorString( error ));
#endif
// OK - so now tmpOut contains our gold - we just need to dig it out now
//printf(" <debugstep(out) = %d> ", debugs++);
//printf("callMultiReduce(%d, %d,...)\n", nOrigBlocks, nOut);
callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev);
//printf(" <debugstep(multireduce) = %d> ", debugs++);
#if H_ERROR_CHECKS
error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror(reduce) = %s\n", hipGetErrorString( error ));
#endif
// Below same as host-code
#if 0
{
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
hipMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < n; i++)
{
res = sumfunObj(res, h_tmp[i + resIdx * n]);
}
out[resIdx] = res;
}
free(h_tmp);
}
#endif
//parallel_free(tmpOut, MemType_DEV);
if (!userBuffer)
hipFree(tmpOut);
}
template <bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histoKernel_smallBinStep(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT myStart, INDEXT end,
OUTPUTTYPE* mySHBins)
{
int myKeys[nMultires];
if (lastSteps)
{
if (myStart < end)
{
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2;
mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]);
}
}
}
else
{
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2;
mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]);
}
}
}
template <bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
__global__
void histoKernel_smallBin(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut, int maxblocks,
int nSteps)
{
// Take care with extern - In order to have two instances of this template the
// type of the extern variables cannot change
// (ie. cannot use "extern __shared__ OUTPUTTYPE bins[]")
extern __shared__ int cudahistogram_allbinstmp[];
OUTPUTTYPE* allbins = (OUTPUTTYPE*)&(*cudahistogram_allbinstmp);
OUTPUTTYPE* mySHBins = &allbins[threadIdx.x];
OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x];
INDEXT myStart = start + (INDEXT)((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + (INDEXT)threadIdx.x;
for (int bin = 0; bin < nOut /*- nLocVars*/; bin++)
mySHBins[bin << SMALL_BLOCK_SIZE_LOG2] = zero;
// Run loops - unroll 8 steps manually
int doNSteps = (nSteps) >> 3;
for (int step = 0; step < doNSteps; step++)
{
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 2*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 3*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 4*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 5*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 6*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 7*SMALL_BLOCK_SIZE, end, mySHBins);
myStart += 8*SMALL_BLOCK_SIZE;
}
int nStepsLeft = (nSteps) - (doNSteps << 3);
for (int step = 0; step < nStepsLeft; step++)
{
histoKernel_smallBinStep<true, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins);
myStart += SMALL_BLOCK_SIZE;
}
// In the end combine results:
#if SMALL_BLOCK_SIZE > 32
__syncthreads();
#endif
// Do first shared stuff:
int keyIndex = threadIdx.x;
while (keyIndex < nOut)
{
OUTPUTTYPE* binResults = &allbins[keyIndex << SMALL_BLOCK_SIZE_LOG2];
OUTPUTTYPE result = ourOut[keyIndex];
for (int tidx = 0; tidx < SMALL_BLOCK_SIZE; tidx++){
result = sumfunObj(result, *binResults++);
}
ourOut[keyIndex] = result;
keyIndex += SMALL_BLOCK_SIZE;
}
}
static inline __device__
int resultToInt(int resultin){ return resultin; }
static inline __device__
int resultToInt(long resultin){ return (int)resultin; }
static inline __device__
int resultToInt(long long resultin){ return (int)resultin; }
static inline __device__
int resultToInt(unsigned int resultin){ return (int)resultin; }
static inline __device__
int resultToInt(unsigned long resultin){ return (int)resultin; }
static inline __device__
int resultToInt(unsigned long long resultin){ return (int)resultin; }
template<typename OUTPUTTYPE>
static inline __device__
int resultToInt(OUTPUTTYPE resultin){ return 0; }
static inline __device__
void intToResult(int resultin, int& resultOut){ resultOut = resultin; }
static inline __device__
void intToResult(int resultin, long& resultOut){ resultOut = (long)resultin; }
static inline __device__
void intToResult(int resultin, unsigned int& resultOut){ resultOut = (unsigned )resultin; }
static inline __device__
void intToResult(int resultin, long long& resultOut){ resultOut = (long long)resultin; }
static inline __device__
void intToResult(int resultin, unsigned long& resultOut){ resultOut = (unsigned long)resultin; }
static inline __device__
void intToResult(int resultin, unsigned long long& resultOut){ resultOut = (unsigned long long)resultin; }
template<typename OUTPUTTYPE>
static inline __device__
void intToResult(int resultin, OUTPUTTYPE& resultout){ ; }
template <bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histoKernel_smallBinByteOneStep(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT myStart, INDEXT end,
volatile unsigned char* mySHBins,
OUTPUTTYPE zero
)
{
if (lastSteps)
{
if (myStart < end)
{
OUTPUTTYPE myOut[nMultires];
int myKeys[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
// index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid]
// Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops...
int index = (((myKeys[res]) >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (myKeys[res] & 0x3);
mySHBins[index]++;
}
}
}
else /*if (myStart < end)*/
{
OUTPUTTYPE myOut[nMultires];
int myKeys[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
// index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid]
// Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops...
int key = myKeys[res];
int index = ((key >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (key & 0x3);
mySHBins[index]++;
}
}
}
template <histogram_type histotype, bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
__global__
void histoKernel_smallBinByte(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut, int maxblocks,
int nSteps)
{
// Ok - idea is as follows: When we have blocksize number of threads, thread tid's nth-bin is at:
// index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4)
// Example:
// With 32 threads bins #7, #8 and #9 will be at (7/4=1, 7%4=3, 8/4=2, 8%4=4, 9/4=2, 9%4=1):
// Bin #7 Bin #8 Bin #9 ... Bin #63
// tid | index index index ... index
// ============== ======== ======== ========
// 0 35 256 257 ... 1923
// 1 39 260 261 ... 1927
// 2 43 264 265 ... 1931
// ...
// 31 255 380 381 ... 2047
// Therefore there are blocksize x nOut number of 1-byte bins
// Outputs are gathered from time to time to 32-bit bins
//
// Example2:
// With 32 threads 7 bins
// Bin #0 Bin #1 Bin #2 Bin #3 Bin #4 Bin #5 Bin #6
// tid | index index index index index index index
// ============== ======== ======== ======== ======== ======== ========
// 0 0 1 2 3 128 129 130
// 1 4 5 6 7 132 133 134
// 2 8 9 10 11 136 137 138
// ...
// 30 120 121 122 123 248 249 250
// 31 124 125 126 127 252 253 254
//
// Example3:
// index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4)
// With 32 threads 3 bins
// Bin #0 Bin #1 Bin #2
// tid | index index index
// ============== ======== ========
// 0 0 1 2
// 1 4 5 6
// 2 8 9 10
// ...
// 30 120 121 122
// 31 124 125 126
extern __shared__ unsigned char allbins2[];
volatile unsigned char* mySHBins = &allbins2[threadIdx.x << 2];
int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0);
OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x];
#if __CUDA_ARCH__ >= 200
OUTPUTTYPE* resultbins = ourOut;
#else
OUTPUTTYPE* resultbins = (OUTPUTTYPE*)(&allbins2[padNOut << SMALL_BLOCK_SIZE_LOG2]);
#endif
INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + threadIdx.x);
// Run loops
//int nFullLoops = nSteps >> 7;
// Clear bins
{
int* tmpSHBins = &((int*)allbins2)[threadIdx.x];
// There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones
for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++)
tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0;
// for (int tmpbin = (bin << 2); tmpbin < padNOut; tmpbin++)
// mySHBins[tmpbin] = 0;
#if __CUDA_ARCH__ < 200
int binid = threadIdx.x;
while(binid < nOut)
{
resultbins[binid] = zero;
binid += SMALL_BLOCK_SIZE;
}
#endif
}
#if SMALL_BLOCK_SIZE > 32
__syncthreads();
#endif
const int looplim = (255 / nMultires) < 63 ? (255 / nMultires) : 63;
for (int stepsRem = nSteps; stepsRem > 0; stepsRem -= looplim)
{
if (stepsRem > looplim)
{
#define MANUAL_UNROLL 1
#if MANUAL_UNROLL
// Unroll manually
// ("unexcpected control flow" construct with #pragma unroll)
#define DO_STEP(NUM) do { if ((NUM) < looplim) { \
histoKernel_smallBinByteOneStep<lastSteps, nMultires>( \
input, xformObj, sumfunObj, myStart /*+ (NUM) * SMALL_BLOCK_SIZE*/, end,\
mySHBins, zero); myStart += SMALL_BLOCK_SIZE; \
} } while (0)
#define DO_16_STEPS(N0) do { \
DO_STEP(N0 + 0); DO_STEP(N0 + 1); DO_STEP(N0 + 2); DO_STEP(N0 + 3); \
DO_STEP(N0 + 4); DO_STEP(N0 + 5); DO_STEP(N0 + 6); DO_STEP(N0 + 7); \
DO_STEP(N0 + 8); DO_STEP(N0 + 9); DO_STEP(N0 + 10); DO_STEP(N0 + 11); \
DO_STEP(N0 + 12); DO_STEP(N0 + 13); DO_STEP(N0 + 14); DO_STEP(N0 + 15); \
} while (0)
DO_16_STEPS(0);
DO_16_STEPS(16);
DO_16_STEPS(32);
DO_16_STEPS(48);
#undef DO_16_STEPS
#undef DO_STEP
//myStart += looplim * SMALL_BLOCK_SIZE;
#else
for (int stepNum = 0; stepNum < looplim; stepNum++){
histoKernel_smallBinByteOneStep<lastSteps, nMultires>(
input,
xformObj,
sumfunObj,
myStart + stepNum * SMALL_BLOCK_SIZE, end,
mySHBins, zero);
}
myStart += looplim * SMALL_BLOCK_SIZE;
#endif // MANUAL_UNROLL
#undef MANUAL_UNROLL
}
else
{
for (int stepNum = 0; stepNum < stepsRem; stepNum++){
histoKernel_smallBinByteOneStep<lastSteps, nMultires>(
input,
xformObj,
sumfunObj,
myStart + stepNum * SMALL_BLOCK_SIZE, end,
mySHBins, zero);
}
myStart += looplim * SMALL_BLOCK_SIZE;
}
// Ok passes done - need to flush results together
{
# if SMALL_BLOCK_SIZE > 32
__syncthreads();
# endif
int binid = threadIdx.x;
while(binid < nOut)
{
// Start from own tid in order to avoid bank-conflicts:
// index = tid * 4 + 4 * (bin / 4) * blocksize + (bin % 4)
int index = (threadIdx.x << 2) + ((binid >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (binid & 0x3);
//int res = (int)allbins2[index];
int res = resultToInt(resultbins[binid]);
int ilimit = SMALL_BLOCK_SIZE - threadIdx.x;
#pragma unroll
for (int i=0; i < SMALL_BLOCK_SIZE; i++)
{
if (i == ilimit)
index -= (SMALL_BLOCK_SIZE << 2);
res += allbins2[index];
//allbins2[index] = 0;
index += 4;
}
intToResult(res, resultbins[binid]);
binid += SMALL_BLOCK_SIZE;
}
# if SMALL_BLOCK_SIZE > 32
__syncthreads();
# endif
// zero the bins
{
int* tmpSHBins = &((int*)allbins2)[threadIdx.x];
// There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones
for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++)
tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0;
}
# if SMALL_BLOCK_SIZE > 32
__syncthreads();
# endif
}
}
// In the end combine results:
#if __CUDA_ARCH__ < 200
#if SMALL_BLOCK_SIZE > 32
__syncthreads();
#endif
int keyIndex = threadIdx.x;
while (keyIndex < nOut)
{
OUTPUTTYPE result = ourOut[keyIndex];
//result = result + resultbins[keyIndex];
result = sumfunObj(result, *(OUTPUTTYPE*)(&resultbins[keyIndex]));
ourOut[keyIndex] = result;
keyIndex += SMALL_BLOCK_SIZE;
}
#endif
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getSmallBinBufSize(int nOut, hipDeviceProp_t* props)
{
int maxblocks = props->multiProcessorCount * 3;
maxblocks *= 2;
if (nOut < 200) maxblocks *= 4;
maxblocks *= 4;
return (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
void callSmallBinHisto(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* out, int nOut,
hipDeviceProp_t* props,
int cuda_arch,
hipStream_t stream,
int* getTmpBufSize,
void* tmpBuffer,
bool outInDev)
{
INDEXT size = end - start;
if (end <= start)
{
if (getTmpBufSize) *getTmpBufSize = 0;
return;
}
int maxblocks = props->multiProcessorCount * 3;
if (size > 2*1024*1024 || getTmpBufSize){
maxblocks *= 2;
// High occupancy requires lots of blocks
if (nOut < 200) maxblocks *= 4;
}
// TODO: Magic constants..
// With low bin-counts and large problems it seems beneficial to use
// more blocks...
if (nOut <= 128 || size > 2*4096*4096 || getTmpBufSize)
maxblocks *= 4;
//printf("maxblocks = %d\n", maxblocks);
OUTPUTTYPE* tmpOut;
if (getTmpBufSize) {
*getTmpBufSize = (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE);
return;
}
if (tmpBuffer)
tmpOut = (OUTPUTTYPE*)tmpBuffer;
else
hipMalloc((void**)&tmpOut, (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE));
#if H_ERROR_CHECKS
/*assert(getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, props) >=
(maxblocks + 1) * nOut * sizeof(OUTPUTTYPE));*/
#endif
// hipMemset(tmpOut, 0, sizeof(OUTPUTTYPE) * nOut * (maxblocks+1));
{
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((maxblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps <<= 1;
if (nsteps * initgrid.x * IBLOCK_SIZE < maxblocks * nOut)
initgrid.x++;
}
hipLaunchKernelGGL(( initKernel), dim3(initgrid), dim3(initblock), 0, stream, tmpOut, zero, maxblocks * nOut, nsteps);
#undef IBLOCK_SIZE_LOG2
#undef IBLOCK_SIZE
}
int sharedNeeded;
if (histotype == histogram_atomic_inc)
{
int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0);
sharedNeeded = (padNOut << SMALL_BLOCK_SIZE_LOG2);
if (cuda_arch < 200)
sharedNeeded += (nOut << 2);
}
else
{
int typesize = sizeof(OUTPUTTYPE);
sharedNeeded = (nOut * typesize) << SMALL_BLOCK_SIZE_LOG2;
//printf("Small-bin, generic, Shared needed = %d\n", sharedNeeded);
}
// Determine number of local variables
// SMALL_LOCALLIMIT is total local size available for one block:
int nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2);
if (nSteps * maxblocks * SMALL_BLOCK_SIZE < size) nSteps++;
if (nSteps > MAX_SMALL_STEPS) nSteps = MAX_SMALL_STEPS;
int nFullSteps = size / (nSteps * maxblocks * SMALL_BLOCK_SIZE);
dim3 grid = maxblocks;
dim3 block = SMALL_BLOCK_SIZE;
for (int i = 0; i < nFullSteps; i++)
{
if (histotype == histogram_atomic_inc)
hipLaunchKernelGGL(( histoKernel_smallBinByte<histotype, false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
else
hipLaunchKernelGGL(( histoKernel_smallBin<false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
start += nSteps * maxblocks * SMALL_BLOCK_SIZE;
#if H_ERROR_CHECKS
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror = %s\n", hipGetErrorString( error ));
#endif
}
size = end - start;
if (size > 0)
{
// Do what steps we still can do without checks
nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2);
if (nSteps > 0)
{
if (histotype == histogram_atomic_inc)
hipLaunchKernelGGL(( histoKernel_smallBinByte<histotype, false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
else
hipLaunchKernelGGL(( histoKernel_smallBin<false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
start += nSteps * maxblocks * SMALL_BLOCK_SIZE;
}
}
size = end - start;
if (size > 0)
{
// Last step here:
int nblocks = size >> SMALL_BLOCK_SIZE_LOG2;
if (nblocks >= maxblocks) nblocks = maxblocks;
else if ((nblocks << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++;
nSteps = size / (nblocks << SMALL_BLOCK_SIZE_LOG2);
if (nSteps * nblocks * SMALL_BLOCK_SIZE < size)
{
nSteps++;
nblocks = size / (nSteps << SMALL_BLOCK_SIZE_LOG2);
if (((nSteps * nblocks) << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++;
}
grid.x = nblocks;
if (histotype == histogram_atomic_inc)
hipLaunchKernelGGL(( histoKernel_smallBinByte<histotype, true, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
else
hipLaunchKernelGGL(( histoKernel_smallBin<true, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream,
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
}
#if H_ERROR_CHECKS
hipError_t error = hipGetLastError();
if (error != hipSuccess)
printf("Cudaerror = %s\n", hipGetErrorString( error ));
#endif
// Finally put together the result:
enum hipMemcpyKind fromOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyHostToDevice;
enum hipMemcpyKind toOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost;
if (stream != 0)
hipMemcpyAsync(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream);
else
hipMemcpy(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut);
// Let's do so that one block handles one bin
grid.x = nOut;
//grid.x = nOut >> SMALL_BLOCK_SIZE_LOG2;
//if ((grid.x << SMALL_BLOCK_SIZE_LOG2) < nOut) grid.x++;
block.x = GATHER_BLOCK_SIZE;
hipLaunchKernelGGL(( gatherKernel), dim3(grid), dim3(block), 0, stream, sumfunObj, tmpOut, nOut, maxblocks, zero);
// TODO: Use async copy for the results as well?
if (outInDev && stream != 0)
hipMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream);
else
hipMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut);
#if 0
{
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(maxblocks * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
hipMemcpy(h_tmp, tmpOut, maxblocks*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < maxblocks; i++)
{
res = sumfunObj(res, h_tmp[i * nOut + resIdx]);
}
out[resIdx] = sumfunObj(res, out[resIdx]);
}
free(h_tmp);
}
#endif
if (!tmpBuffer)
hipFree(tmpOut);
}
template <histogram_type histotype, typename OUTPUTTYPE>
static inline
bool smallBinLimit(int nOut, OUTPUTTYPE zero, hipDeviceProp_t* props, int cuda_arch)
{
int shlimit = props->sharedMemPerBlock - 300;
int typeSize = sizeof(OUTPUTTYPE);
if (histotype == histogram_atomic_inc)
if ((((4 * nOut) << 5) + (cuda_arch < 200 ? nOut * 16 : 0)) < shlimit)
return true;
if (((4 * nOut * typeSize) << 5) < shlimit)
return true;
return false;
}
__global__
void detectCudaArchKernel(int* res)
{
int result;
#if __CUDA_ARCH__ >= 210
result = 210;
#elif __CUDA_ARCH__ >= 200
result = 200;
#elif __CUDA_ARCH__ >= 130
result = 130;
#elif __CUDA_ARCH__ >= 120
result = 120;
#elif __CUDA_ARCH__ >= 110
result = 110;
#else
result = 100;
#endif
if (threadIdx.x == 0)
*res = result;
}
static
int DetectCudaArch(void)
{
// The only way to know from host-code, which device architecture our kernels have been generated
// against, is to run a kernel that actually checks it.. :)
dim3 grid = 1;
//dim3 block = 32;
// TODO: Allow static storage so that we can ask just once for the arch???
// NOTE: This function implies synchromization between CPU and GPU - so use static here...
static int result = 0;
//int result = 0;
if (result == 0)
{
void* tmpBuf;
hipMalloc(&tmpBuf, sizeof(int));
hipLaunchKernelGGL(( detectCudaArchKernel), dim3(grid), dim3(grid), 0, 0, (int*)tmpBuf);
hipMemcpy(&result, tmpBuf, sizeof(int), hipMemcpyDeviceToHost);
hipFree(tmpBuf);
//printf("Detected CUDA_ARCH = %d\n", result);
}
return result;
}
static bool runMultiPass(int nOut, hipDeviceProp_t* props, int cuda_arch, size_t outsize, histogram_type histotype)
{
int subsize = determineSubHistoSize(nOut, outsize, histotype, cuda_arch, props);
if (cuda_arch < 120){
if (subsize <= 0 || nOut > 2 * subsize)
return false;
return true;
}
else
{
if (subsize <= 0 || nOut > 16 * subsize)
return false;
return true;
}
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
hipError_t
callHistogramKernel(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
hipStream_t stream, void* tmpBuffer,
bool allowMultiPass)
{
int devId;
hipDeviceProp_t props;
hipError_t cudaErr = hipGetDevice( &devId );
if (cudaErr != 0) return cudaErr;
//assert(!cudaErr);
cudaErr = hipGetDeviceProperties( &props, devId );
if (cudaErr != 0) return cudaErr;
int cuda_arch = DetectCudaArch();
enum hipFuncCache_t old;
hipDeviceGetCacheConfig(&old);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
if (nOut <= 0) return hipSuccess;
// 100 Mib printf-limit should be enough...
// hipDeviceSetLimit(hipLimitPrintfFifoSize, 1024 * 1024 * 100);
if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch))
{
callSmallBinHisto<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev);
}
else if (binsFitIntoShared(nOut, zero, &props, cuda_arch))
{
callHistogramKernelImpl<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, NULL, tmpBuffer, outInDev, cuda_arch);
}
else if (allowMultiPass && runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype))
{
callHistogramKernelMultiPass<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, tmpBuffer, outInDev, cuda_arch);
}
else
{
callHistogramKernelLargeNBins<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev);
}
hipDeviceSetCacheConfig(old);
return hipSuccess;
}
template <typename nDimIndexFun, int nDim, typename USERINPUTTYPE, typename INDEXT, typename OUTPUTTYPE>
class wrapHistoInput
{
public:
nDimIndexFun userIndexFun;
INDEXT starts[nDim];
//int ends[nDim];
INDEXT sizes[nDim];
__host__ __device__
void operator() (USERINPUTTYPE input, INDEXT i, int* result_index, OUTPUTTYPE* results, int nresults) const {
int coords[nDim];
int tmpi = i;
#pragma unroll
for (int d=0; d < nDim; d++)
{
// Example of how this logic works - imagine a cube of (10,100,1000), and take index 123 456
// newI = 123 456 / 10 = 12 345, offset = 123 456 - 123 450 = 6 (this is our first coordinate!),
// newI = 12 345 / 100 = 123, offset = 12 345 - 12 300 = 45 (this is our second coordinate!),
// newI = 123 / 1000 = 0, offset = 123 - 0 = 123 (this is our last coordinate!)
// Result = [123, 45, 6]
INDEXT newI = tmpi / sizes[d];
INDEXT offset = tmpi - newI * sizes[d];
coords[d] = starts[d] + offset;
tmpi = newI;
}
// Now just call wrapped functor with right coordinate values
userIndexFun(input, coords, result_index, results, nresults);
}
};
template <histogram_type histotype, int nMultires, int nDim,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
hipError_t
callHistogramKernelNDim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT* starts, INDEXT* ends,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
hipStream_t stream, void* tmpBuffer,
bool allowMultiPass)
{
wrapHistoInput<TRANSFORMFUNTYPE, nDim, INPUTTYPE, INDEXT, OUTPUTTYPE> wrapInput;
INDEXT start = 0;
INDEXT size = 1;
for (int d = 0; d < nDim; d++)
{
wrapInput.starts[d] = starts[d];
wrapInput.sizes[d] = ends[d] - starts[d];
// Example: starts = [3, 10, 23], sizes = [10, 100, 1000]
// start = 3 * 1 = 3, size = 10
// start = 3 + 10 * 10 = 103, size = 10*100 = 1000
// start = 103 + 1000*23 = 23 103, size = 1000*1000 = 1 000 000
start += starts[d] * size;
size *= wrapInput.sizes[d];
if (ends[d] <= starts[d]) return hipSuccess;
}
wrapInput.userIndexFun = xformObj;
INDEXT end = start + size;
return callHistogramKernel<histotype, nMultires>
(input, wrapInput, sumfunObj, start, end, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
hipError_t
callHistogramKernel2Dim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT x0, INDEXT x1,
INDEXT y0, INDEXT y1,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
hipStream_t stream, void* tmpBuffer,
bool allowMultiPass)
{
INDEXT starts[2] = { x0, y0 };
INDEXT ends[2] = { x1, y1 };
return callHistogramKernelNDim<histotype, nMultires, 2>
(input, xformObj, sumfunObj, starts, ends, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass);
}
struct histogram_defaultXform
{
__host__ __device__
void operator() (int* input, int i, int* result_index, int* results, int nresults) const {
//int idata = input[i];
#pragma unroll
for (int resIndex = 0; resIndex < nresults; resIndex++)
{
*result_index++ = *input++;
*results++ = 1;
}
}
};
template <typename OUTPUTTYPE>
struct histogram_defaultSum
{
__host__ __device__
OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const {
return i1 + i2;
}
};
template <typename INPUTTYPE, typename OUTPUTTYPE>
struct histogram_dummyXform
{
__host__ __device__
void operator() (INPUTTYPE* input, int i, int* result_index, OUTPUTTYPE* results, int nresults) const {
//int idata = input[i];
int index = i;
(void)input;
#pragma unroll
for (int resIndex = 0; resIndex < nresults; resIndex++)
{
*result_index++ = index++;
*results++ = 1;//*input++;
}
}
};
template <typename OUTPUTTYPE>
struct histogram_dummySum
{
__host__ __device__
OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const {
return i1;
}
};
template <histogram_type histotype, typename OUTPUTTYPE>
int getHistogramBufSize(OUTPUTTYPE zero, int nOut)
{
int result = 0;
int devId;
hipDeviceProp_t props;
hipError_t cudaErr = hipGetDevice( &devId );
if (cudaErr != 0) return -1;
//assert(!cudaErr);
cudaErr = hipGetDeviceProperties( &props, devId );
if (cudaErr != 0) return -1;
int cuda_arch = DetectCudaArch();
if (nOut <= 0) return 0;
if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch))
{
result = getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, &props);
}
else if (binsFitIntoShared(nOut, zero, &props, cuda_arch))
{
result = getMediumHistoTmpbufSize<histotype, OUTPUTTYPE>(nOut, &props);
}
else if (runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype))
{
result = getMultipassBufSize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch);
}
else
{
result = getLargeBinTmpbufsize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch);
}
return result;
}
// undef everything
#undef H_ERROR_CHECKS
#undef HBLOCK_SIZE_LOG2
#undef HBLOCK_SIZE
#undef HMBLOCK_SIZE_LOG2
#undef HMBLOCK_SIZE
#undef LBLOCK_SIZE_LOG2
#undef LBLOCK_SIZE
#undef GATHER_BLOCK_SIZE_LOG2
#undef GATHER_BLOCK_SIZE
#undef LBLOCK_WARPS
#undef RBLOCK_SIZE
#undef RMAXSTEPS
#undef NHSTEPSPERKEY
#undef MAX_NHSTEPS
#undef MAX_MULTISTEPS
#undef MAX_NLHSTEPS
#undef STRATEGY_CHECK_INTERVAL_LOG2
#undef STRATEGY_CHECK_INTERVAL
#undef HASH_COLLISION_STEPS
#undef USE_JENKINS_HASH
#undef LARGE_NBIN_CHECK_INTERVAL_LOG2
#undef LARGE_NBIN_CHECK_INTERVAL
#undef SMALL_BLOCK_SIZE_LOG2
#undef SMALL_BLOCK_SIZE
#undef MAX_SMALL_STEPS
#undef USE_ATOMICS_HASH
#undef USE_BALLOT_HISTOGRAM
#undef TAKE_WARP_MUTEX
#undef GIVE_WARP_MUTEX
#undef FREE_MUTEX_ID
#if USE_MEDIUM_PATH
#undef MEDIUM_BLOCK_SIZE_LOG2
#undef MEDIUM_BLOCK_SIZE
#endif
#undef USE_MEDIUM_PATH
| a1b8e9f413e722489b07d57bf5ae80cd9da8d4b6.cu | #include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
/***Following code are adapted from https://github.com/trantalaiho/Cuda-Histogram and I makde the following modification
1. Change the template parameters of function callHistogramKernel to acomodate unsigned char
2. Modify the default block size to 256
3. Minor modification for function callHistogramKernelImpl
**/
#define H_ERROR_CHECKS 0
#if H_ERROR_CHECKS
#include <assert.h>
#include <stdio.h>
#endif
#define HBLOCK_SIZE_LOG2 7
#define HBLOCK_SIZE (1 << HBLOCK_SIZE_LOG2) // = 32
#define HMBLOCK_SIZE_LOG2 8
#define HMBLOCK_SIZE (1 << HMBLOCK_SIZE_LOG2) // = 32
#define LBLOCK_SIZE_LOG2 5
#define LBLOCK_SIZE (1 << LBLOCK_SIZE_LOG2) // = 256
#define LBLOCK_WARPS (LBLOCK_SIZE >> 5)
#define USE_MEDIUM_PATH 1
#if USE_MEDIUM_PATH
// For now only MEDIUM_BLOCK_SIZE_LOG2 == LBLOCK_SIZE_LOG2 works
# define MEDIUM_BLOCK_SIZE_LOG2 8
# define MEDIUM_BLOCK_SIZE (1 << MEDIUM_BLOCK_SIZE_LOG2) // 128
# define MBLOCK_WARPS (MEDIUM_BLOCK_SIZE >> 5)
#define MED_THREAD_DEGEN 16
#endif
#define RBLOCK_SIZE 64
#define RMAXSTEPS 80
#define NHSTEPSPERKEY 32
#define MAX_NHSTEPS 1024
#define MAX_MULTISTEPS 1024
#define MAX_NLHSTEPS 2048
#define GATHER_BLOCK_SIZE_LOG2 6
#define GATHER_BLOCK_SIZE (1 << GATHER_BLOCK_SIZE_LOG2)
#define STRATEGY_CHECK_INTERVAL_LOG2 7
#define STRATEGY_CHECK_INTERVAL (1 << STRATEGY_CHECK_INTERVAL_LOG2)
#define HISTOGRAM_DEGEN_LIMIT 20
#define HASH_COLLISION_STEPS 2
const int numActiveUpperLimit = 24;
#define USE_JENKINS_HASH 0
#define LARGE_NBIN_CHECK_INTERVAL_LOG2 5
#define LARGE_NBIN_CHECK_INTERVAL (1 << LARGE_NBIN_CHECK_INTERVAL_LOG2)
#define SMALL_BLOCK_SIZE_LOG2 6
#define SMALL_BLOCK_SIZE (1 << SMALL_BLOCK_SIZE_LOG2)
#define MAX_SMALL_STEPS 2040
static unsigned int* d_Data = NULL;
static unsigned int* d_Histogram = NULL;
//static unsigned char* d_Histogram = NULL;
static unsigned int* h_Histogram = NULL;
//static unsigned char* h_Histogram = NULL;
#if __CUDA_ARCH__ >= 120
#define USE_ATOMICS_HASH 0
#else
#define USE_ATOMICS_HASH 0
#endif
#if (__CUDA_ARCH__ >= 200)
# define USE_BALLOT_HISTOGRAM 1
#else
# define USE_BALLOT_HISTOGRAM 0
#endif
#ifndef __device__
#define __device__
#endif
#ifndef __host__
#define __host__
#endif
#ifndef __shared__
#define __shared__
#endif
enum histogram_type {
histogram_generic, /*!< \brief Generic histogram, for any types */
histogram_atomic_inc, /*!< \brief Each output-value is constant 1 */
histogram_atomic_add, /*!< \brief Output-type is such that atomicAdd()
//function can be used */
};
template <histogram_type histotype, typename OUTPUTTYPE>
static
int
getHistogramBufSize(OUTPUTTYPE zero, int nOut);
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
cudaError_t
callHistogramKernel(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev = false,
cudaStream_t stream = 0, void* tmpBuffer = NULL,
bool allowMultiPass = true);
template <histogram_type histotype, int nMultires, int nDim,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
cudaError_t
callHistogramKernelNDim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT* starts, INDEXT* ends,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev = false,
cudaStream_t stream = 0, void* tmpBuffer = NULL,
bool allowMultiPass = true);
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
cudaError_t
callHistogramKernel2Dim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT x0, INDEXT x1,
INDEXT y0, INDEXT y1,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
cudaStream_t stream, void* tmpBuffer,
bool allowMultiPass = true);
struct test_xform {
__host__ __device__ void operator() (unsigned int* input, int i, int* res_idx, unsigned int* res, int nres) const {
*res_idx++ = input[i];
*res++ = 1;
}
};
//struct test_xform {
// __host__ __device__ void operator() (unsigned int* input, int i, int* res_idx, unsigned char* res, int nres) const {
// *res_idx++ = input[i];
// *res++ = 1;
// }
//};
// Sum-functor to be used for reduction - just a normal sum of two integers
struct test_sumfun {
__device__ __host__ unsigned int operator() (unsigned int res1, unsigned int res2) const{
return res1 + res2;
}
};
//struct test_sumfun {
// __device__ __host__ unsigned char operator() (unsigned char res1, unsigned char res2) const{
// unsigned int sum = (unsigned int)res1+(unsigned int)res2;
// if(sum>255) return 255;
// return res1 + res2;
// }
//};
__global__ void computeHistogram(unsigned int *buffer, int size, unsigned int *histo )
{
__shared__ unsigned int temp[1024];
temp[threadIdx.x + 0] = 0;
temp[threadIdx.x + 256] = 0;
temp[threadIdx.x + 512] = 0;
temp[threadIdx.x + 768] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < size)
{
atomicAdd( &temp[buffer[i]], 1);
i += offset;
}
__syncthreads();
atomicAdd( &(histo[threadIdx.x + 0]), temp[threadIdx.x + 0] );
atomicAdd( &(histo[threadIdx.x + 256]), temp[threadIdx.x + 256] );
atomicAdd( &(histo[threadIdx.x + 512]), temp[threadIdx.x + 512] );
atomicAdd( &(histo[threadIdx.x + 768]), temp[threadIdx.x + 768] );
}
extern "C" void opt_init(unsigned int** h_Data, int width, int height)
{
cudaMalloc((void **)&d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int));
cudaMemset( d_Histogram, 0,HISTO_HEIGHT * HISTO_WIDTH * sizeof( unsigned int ));
unsigned int *data = new unsigned int[width*height];
for(int j = 0;j<height;++j)
{
memcpy(data+j*width, h_Data[j], sizeof(unsigned int)*width);
}
cudaMalloc((void **)&d_Data, width*height*sizeof(unsigned int));
cudaMemcpy(d_Data, data, width*height*sizeof(unsigned int), cudaMemcpyHostToDevice);
delete []data;
}
extern "C" void opt_2dhisto(int size)
{
test_xform xform;
test_sumfun sum;
//unsigned char zero = 0x00;
callHistogramKernel<histogram_atomic_inc, 1>(d_Data, xform, sum, 0, size, 0U, &d_Histogram[0], HISTO_HEIGHT * HISTO_WIDTH, true);
h_Histogram = new unsigned int[HISTO_HEIGHT * HISTO_WIDTH];
cudaMemcpy(h_Histogram, d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int), cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
}
extern "C" void opt_free()
{
cudaFree(d_Histogram);
cudaFree(d_Data);
}
extern "C" void opt_copyFromDevice(unsigned char* output)
{
for(int i = 0;i<HISTO_HEIGHT * HISTO_WIDTH;++i)
{
int value = h_Histogram[i]/1000;
output[i] = value>255?255:value;
}
// memcpy(output,h_Histogram,sizeof(unsigned char)*HISTO_HEIGHT*HISTO_WIDTH);
delete[] h_Histogram;
}
//#include <stdio.h>
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
__global__
void multireduceKernel(OUTPUTTYPE* input, int n, int nOut, int nsteps, SUMFUNTYPE sumFun, OUTPUTTYPE zero, int stride, OUTPUTTYPE* initialValues)
{
int tid = threadIdx.x;
int bidx = blockIdx.x;
int bidy = blockIdx.y;
OUTPUTTYPE myout = zero;
int i;
for (i = 0; i < nsteps; i++)
{
int subIndex = bidx * RBLOCK_SIZE + tid;
int cidx = subIndex + i * RBLOCK_SIZE * gridDim.x;
if (cidx < n)
{
// printf("t(%2d)b(%3d,%2d) r(%d)\n", tid, bidx, bidy, cidx + bidy * stride);
myout = sumFun(myout, input[cidx + bidy * stride]);
}
}
__shared__ OUTPUTTYPE tmp[RBLOCK_SIZE / 2];
for (int curLimit = RBLOCK_SIZE / 2; curLimit > 0; curLimit >>= 1)
{
// First write out the current result for threads above the limit
if (tid >= curLimit && tid < (curLimit << 1))
tmp[tid - curLimit] = myout;
// Otherwise wait for the write the complete and add that value to our result
__syncthreads();
if (tid < curLimit)
myout = sumFun(myout, tmp[tid]);
// IMPORTANT: Wait before new loop for the read to complete
__syncthreads();
}
// Done! myout contains the result for our block for thread 0!!
if (tid == 0)
{
// NOTE: If gridDim == 1 then we have finally reached the last iteration and
// can write the result into the final result-value array
// (ie. The same as initialvalue-array)
if (gridDim.x == 1)
{
OUTPUTTYPE initVal = initialValues[bidy];
initialValues[bidy] = sumFun(initVal, myout);
// And we are DONE!
}
else
{
// printf("t(%2d)b(%3d,%2d) w(%d)\n", tid, bidx, bidy, bidx + bidy * stride);
initialValues[bidx + bidy * stride] = myout;
}
}
}
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
static
void callMultiReduce(
int arrLen, int nOut, OUTPUTTYPE* h_results, OUTPUTTYPE* input,
SUMFUNTYPE sumFunObj, OUTPUTTYPE zero,
cudaStream_t stream, void* tmpbuf, bool outInDev)
{
int n = arrLen;
// Set-up yet another temp buffer: (TODO: Pool alloc somehow?)
OUTPUTTYPE* resultTemp = NULL;
// TODO: Why do we need such a large temporary array?
// Shouldn't sizeof(OUTPUTTYPE) * nOut * xblocks be enough??
if (tmpbuf)
{
resultTemp = (OUTPUTTYPE*)tmpbuf;
}
else
{
cudaMalloc((void**)&resultTemp, sizeof(OUTPUTTYPE) * nOut * arrLen);
#if H_ERROR_CHECKS
//printf("resultTemp = %p\n", resultTemp);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror0 = %s\n", cudaGetErrorString( error ));
#endif
}
OUTPUTTYPE* output = resultTemp;
enum cudaMemcpyKind fromOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice;
enum cudaMemcpyKind toOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost;
// Copy initial values:
do
{
int steps = (n + (RBLOCK_SIZE - 1)) / RBLOCK_SIZE;
if (steps > RMAXSTEPS)
steps = RMAXSTEPS;
int yblocks = nOut;
int xblocks = (n + (steps * RBLOCK_SIZE - 1)) / (steps * RBLOCK_SIZE);
const dim3 block = RBLOCK_SIZE;
const dim3 grid(xblocks, yblocks, 1);
if (xblocks == 1) // LAST ONE to start
{
//printf("cudaMemcpy(%p, %p, %d, %d);\n", output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut);
if (stream != 0)
cudaMemcpyAsync(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut, stream);
else
cudaMemcpy(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut);
}
#if H_ERROR_CHECKS
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror1 = %s\n", cudaGetErrorString( error ));
#endif
// Then the actual kernel call
multireduceKernel<<<grid, block, 0, stream>>>(input, n, nOut, steps, sumFunObj, zero, arrLen, output);
#if H_ERROR_CHECKS
error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror2 = %s\n", cudaGetErrorString( error ));
#endif
if (xblocks > 1)
{
// Swap pointers:
OUTPUTTYPE* tmpptr = output;
output = input;
input = tmpptr;
}
n = xblocks;
} while(n > 1);
// Then copy back the results:
//cudaMemcpyAsync(h_results, resultTemp, sizeof(OUTPUTTYPE) * nOut, cudaMemcpyDeviceToHost, CURRENT_STREAM());
// TODO: Support async copy here??
if (outInDev && stream != 0)
cudaMemcpyAsync(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut, stream);
else
cudaMemcpy(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut);
#if H_ERROR_CHECKS
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror3 = %s\n", cudaGetErrorString( error ));
#endif
if (!tmpbuf)
{
cudaFree(resultTemp);
}
#if H_ERROR_CHECKS
error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror4 = %s\n", cudaGetErrorString( error ));
#endif
}
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
__global__
void gatherKernel(SUMFUNTYPE sumfunObj, OUTPUTTYPE* blockOut, int nOut, int nEntries, OUTPUTTYPE zero)
{
//int resIdx = threadIdx.x + blockDim.x * blockIdx.x;
int resIdx = blockIdx.x;
if (resIdx < nOut)
{
// Let's divide the nEntries first evenly on all threads and read 4 entries in a row
int locEntries = (nEntries) >> (GATHER_BLOCK_SIZE_LOG2);
// Note: Original array entry is stored in resIdx + nOut * nEntries!
OUTPUTTYPE res = zero;
if (threadIdx.x == 0)
res = blockOut[resIdx + nOut * nEntries];
// Shift starting ptr:
blockOut = &blockOut[resIdx];
int locIdx = threadIdx.x * locEntries;
for (int i=0; i < locEntries/4; i++)
{
OUTPUTTYPE x1 = blockOut[nOut * (locIdx + (i << 2))];
OUTPUTTYPE x2 = blockOut[nOut * (locIdx + (i << 2) + 1)];
OUTPUTTYPE x3 = blockOut[nOut * (locIdx + (i << 2) + 2)];
OUTPUTTYPE x4 = blockOut[nOut * (locIdx + (i << 2) + 3)];
res = sumfunObj(res, x1);
res = sumfunObj(res, x2);
res = sumfunObj(res, x3);
res = sumfunObj(res, x4);
}
// Then do the rest
for (int j = (locEntries/4)*4; j < locEntries; j++)
{
OUTPUTTYPE x1 = blockOut[nOut * (locIdx + j)];
res = sumfunObj(res, x1);
}
// Still handle rest starting from index "locEntries * BLOCK_SIZE":
locIdx = threadIdx.x + (locEntries << GATHER_BLOCK_SIZE_LOG2);
if (locIdx < nEntries)
res = sumfunObj(res, blockOut[nOut * locIdx]);
// Ok - all that is left is to do the final parallel reduction between threads:
{
__shared__ OUTPUTTYPE data[GATHER_BLOCK_SIZE];
//volatile OUTPUTTYPE* data = (volatile OUTPUTTYPE*)&dataTmp[0];
// TODO Compiler complains with volatile from this - why?
//error: no operator "=" matches these operands
// operand types are: volatile myTestType_s = myTestType
// Silly - does not happen with built-in types (nice...)
data[threadIdx.x] = res;
#if GATHER_BLOCK_SIZE == 512
__syncthreads();
if (threadIdx.x < 256)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 256]);
#endif
#if GATHER_BLOCK_SIZE >= 256
__syncthreads();
if (threadIdx.x < 128)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 128]);
#endif
#if GATHER_BLOCK_SIZE >= 128
__syncthreads();
if (threadIdx.x < 64)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 64]);
__syncthreads();
#endif
#if GATHER_BLOCK_SIZE >= 64
__syncthreads();
if (threadIdx.x < 32)
data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 32]);
#endif
__syncthreads();
if (threadIdx.x < 16) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 16]);
__syncthreads();
if (threadIdx.x < 8) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 8]);
__syncthreads();
if (threadIdx.x < 4) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 4]);
__syncthreads();
if (threadIdx.x < 2) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 2]);
__syncthreads();
if (threadIdx.x < 1) *blockOut = sumfunObj(data[threadIdx.x], data[threadIdx.x + 1]);
}
}
}
#define FREE_MUTEX_ID 0xffeecafe
#define TAKE_WARP_MUTEX(ID) do { \
int warpIdWAM = threadIdx.x >> 5; \
__shared__ volatile int lockVarWarpAtomicMutex;\
bool doneWAM = false;\
bool allDone = false; \
while(!allDone){ \
__syncthreads(); \
if (!doneWAM) lockVarWarpAtomicMutex = warpIdWAM; \
__syncthreads(); \
if (lockVarWarpAtomicMutex == FREE_MUTEX_ID) allDone = true; \
__syncthreads(); \
if (lockVarWarpAtomicMutex == warpIdWAM){ /* We Won */
// User code comes here
#define GIVE_WARP_MUTEX(ID) doneWAM = true; \
lockVarWarpAtomicMutex = FREE_MUTEX_ID; \
} \
} \
__syncthreads(); \
} while(0)
// NOTE: Init must be called from divergent-free code (or with exited warps)
#define INIT_WARP_MUTEX2(MUTEX) do { MUTEX = FREE_MUTEX_ID; __syncthreads(); } while(0)
#if 0 && __CUDA_ARCH__ >= 120 // TODO: NOT WORKING THIS CODEPATH - find out why
#define TAKE_WARP_MUTEX2(MUTEX) do { \
int warpIdWAM = 1000000 + threadIdx.x / 32; \
bool doneWAM = false;\
while(!doneWAM){ \
int old = -2; \
if (threadIdx.x % 32 == 0) \
old = atomicCAS(&MUTEX, FREE_MUTEX_ID, warpIdWAM); \
if (__any(old == FREE_MUTEX_ID)){ /* We Won */
// User code comes here
#define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \
atomicExch(&MUTEX, FREE_MUTEX_ID); \
} \
} \
} while(0)
#else
#define TAKE_WARP_MUTEX2(MUTEX) do { \
int warpIdWAM = 1000000 + threadIdx.x / 32; \
bool doneWAM = false;\
bool allDone = false; \
while(!allDone){ \
__syncthreads(); \
if (!doneWAM) MUTEX = warpIdWAM; \
__syncthreads(); \
if (MUTEX == FREE_MUTEX_ID) allDone = true; \
if (MUTEX == warpIdWAM){ /* We Won */
// User code comes here
#define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \
MUTEX = FREE_MUTEX_ID; \
} \
} \
} while(0)
#endif
#if USE_BALLOT_HISTOGRAM
template <typename OUTPUTTYPE>
static inline __device__
OUTPUTTYPE mySillyPopCount(unsigned int mymask, OUTPUTTYPE zero)
{
return zero;
}
static inline __device__
int mySillyPopCount(unsigned int mymask, int zero)
{
return (int)__popc(mymask);
}
static inline __device__
unsigned int mySillyPopCount(unsigned int mymask, unsigned int zero)
{
return (unsigned int)__popc(mymask);
}
static inline __device__
long long mySillyPopCount(unsigned int mymask, long long zero)
{
return (long long)__popc(mymask);
}
static inline __device__
unsigned long long mySillyPopCount(unsigned int mymask, unsigned long long zero)
{
return (unsigned long long)__popc(mymask);
}
template <histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
bool ballot_makeUnique(
SUMFUNTYPE sumfunObj,
int myKey, OUTPUTTYPE* myOut, OUTPUTTYPE* s_vals, int* s_keys, int* nSameKeys)
{
unsigned int mymask;
/* #if HBLOCK_SIZE != 32
#error Please use threadblocks of 32 threads
#endif*/
//startKey = s_keys[startIndex];
// First dig out for each thread who are the other threads that have the same key as us...
//int i = 0;
if (checkNSame) {
unsigned int donemask = 0;
int startIndex = 32 - 1;
int startKey = s_keys[startIndex];
*nSameKeys = 0;
while (~donemask != 0 /*&& i++ < 32*/)
{
unsigned int mask = __ballot(myKey == startKey);
if (myKey == startKey)
mymask = mask;
donemask |= mask;
{
int nSame = __popc(mask);
if (nSame > *nSameKeys)
*nSameKeys = nSame;
}
startIndex = 31 - __clz(~donemask);
//if (myKey == 0) printf("Startindex = %d, donemask = 0x%08x, mask = 0x%08x\n", startIndex, donemask, mask);
if (startIndex >= 0)
startKey = s_keys[startIndex];
}
} else {
unsigned int donemask = 0;
int startIndex = 32 - 1;
while (startIndex >= 0)
{
int startKey = s_keys[startIndex];
unsigned int mask = __ballot(myKey == startKey);
if (myKey == startKey)
mymask = mask;
donemask |= mask;
startIndex = 31 - __clz(~donemask);
}
}
// Ok now mymask contains those threads - now we just reduce locally - all threads run at the same
// time, but reducing threads lose always half of them with each iteration - it would help
// to work with more than 32 entries, but the algorithm seems to get tricky there.
{
// Compute the left side of the mask and the right side. rmask first will contain our thread index, but
// we zero it out immediately
unsigned int lmask = (mymask >> (threadIdx.x & 31)) << (threadIdx.x & 31);
int IamNth = __popc(lmask) - 1;
bool Iwrite = IamNth == 0;
if (histotype == histogram_atomic_inc)
{
// Fast-path for atomic inc
*myOut = mySillyPopCount(mymask, *myOut);
return Iwrite && (myKey >= 0);
}
else
{
unsigned int rmask = mymask & (~lmask);
// Now compute which number is our thread in the subarray of those threads that have the same key
// starting from the left (ie. index == 31). So for thread 31 this will be always zero.
int nextIdx = 31 - __clz(rmask);
s_vals[(threadIdx.x & 31)] = *myOut;
//if (myKey == 0) printf("tid = %02d, IamNth = %02d, mask = 0x%08x, rmask = 0x%08x \n", threadIdx.x, IamNth, mymask, rmask);
//bool done = __all(nextIdx < 0);
// TODO: Unroll 5?
while (!__all(nextIdx < 0))
{
// Reduce towards those threads that have lower IamNth
// Our thread reads the next one if our internal ID is even
if ((IamNth & 0x1) == 0)
{
if (nextIdx >= 0){
// if (myKey == 0) printf("tid:%02d, add with %02d\n", threadIdx.x, nextIdx);
*myOut = sumfunObj(*myOut, s_vals[nextIdx]);
}
// And writes to the shared memory if our internal ID is third on every 4-long subarray:
if ((IamNth & 0x3) == 2)
{
// if (myKey == 0) printf("Tid %02d, store\n", threadIdx.x);
s_vals[(threadIdx.x & 31)] = *myOut;
}
}
// Now the beautiful part: Kill every other bit in the rmask bitfield. How, you ask?
// Using ballot: Every bit we want to kill has IamNth odd, or conversely, we only
// want to keep those bits that have IamNth even...
rmask &= __ballot((IamNth & 0x1) == 0);
nextIdx = 31 - __clz(rmask);
// if (myKey == 0) printf("tid = %02d, next = %02d, key = %d\n", threadIdx.x, rmask, nextIdx, myKey);
IamNth >>= 1;
//printf("i = %d\n", i);
}
// And voila, we are done - write out the result:
return Iwrite && (myKey >= 0);
}
}
}
#endif
template <bool laststeps, typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void myAtomicWarpAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, bool Iwrite, int* warpmutex)
{
// Taken from http://forums.nvidia.com/index.php?showtopic=72925
// This is a tad slow, but allows arbitrary operation
// For writes of 16 bytes or less AtomicCAS could be faster
// (See CUDA programming guide)
TAKE_WARP_MUTEX(0);
//__shared__ int warpmutex;
//INIT_WARP_MUTEX2(*warpmutex);
//TAKE_WARP_MUTEX2(*warpmutex);
bool write = Iwrite;
#define MU_TEMP_MAGIC 0xffffaaaa
*keyAddr = MU_TEMP_MAGIC;
while (1)
{
// Vote whose turn is it - remember, one thread does succeed always!:
if (write) *keyAddr = threadIdx.x;
if (*keyAddr == MU_TEMP_MAGIC)
break;
if (*keyAddr == threadIdx.x) // We won!
{
// Do arbitrary atomic op:
*addr = sumfunObj(*addr, val);
write = false;
*keyAddr = MU_TEMP_MAGIC;
}
}
GIVE_WARP_MUTEX(0);
//GIVE_WARP_MUTEX2(*warpmutex);
#undef MU_TEMP_MAGIC
}
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void myAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj)
{
// Taken from http://forums.nvidia.com/index.php?showtopic=72925
// This is a tad slow, but allows arbitrary operation
// For writes of 16 bytes or less AtomicCAS could be faster
// (See CUDA programming guide)
bool write = true;
#define MU_TEMP_MAGIC 0xffffaaaa
*keyAddr = MU_TEMP_MAGIC;
while (1)
{
// Vote whose turn is it - remember, one thread does succeed always!:
if (write ) *keyAddr = threadIdx.x;
if (*keyAddr == MU_TEMP_MAGIC)
break;
if (*keyAddr == threadIdx.x) // We won!
{
// Do arbitrary atomic op:
*addr = sumfunObj(*addr, val);
write = false;
*keyAddr = MU_TEMP_MAGIC;
}
}
#undef MU_TEMP_MAGIC
}
/*static __inline__ __device__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val)
{
return __ullAtomicAdd(address, val);
}*/
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val)
{
//*addr = val;
}
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, int val)
{
//*addr = val;
}
#if 0
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, float val)
{
//*addr = val;
}
#endif
template <typename OUTPUTTYPE>
static inline __device__
void atomicAdd(OUTPUTTYPE* addr, unsigned int val)
{
//*addr = val;
}
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void myAtomicAddStats(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, int* nSameOut, bool Iwrite)
{
// Taken from http://forums.nvidia.com/index.php?showtopic=72925
bool write = true;
*keyAddr = 0xffffffff;
while (Iwrite)
{
// Vote whose turn is it - remember, one thread does succeed always!:
if (write ) *keyAddr = threadIdx.x;
if (*keyAddr == 0xffffffff)
break;
if (*keyAddr == threadIdx.x) // We won!
{
// Do arbitrary atomic op:
*addr = sumfunObj(*addr, val);
write = false;
*keyAddr = 0xffffffff;
} else {
*nSameOut = *nSameOut + 1;
}
}
{
// Then find max
__shared__ int nSame[HBLOCK_SIZE];
nSame[threadIdx.x] = *nSameOut;
#define TMPMAX(A,B) (A) > (B) ? (A) : (B)
#define tidx threadIdx.x
if (tidx < 16) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 16]);
if (tidx < 8) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 8]);
if (tidx < 4) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 4]);
if (tidx < 2) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 2]);
if (tidx < 1) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 1]);
#undef TMPMAX
#undef tidx
// Broadcast to all threads
*nSameOut = nSame[0];
}
}
// TODO: Make unique within one warp?
template<histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
bool reduceToUnique(OUTPUTTYPE* res, int myKey, int* nSame, SUMFUNTYPE sumfunObj, int* keys, OUTPUTTYPE* outputs)
{
keys[(threadIdx.x & 31)] = myKey;
#if USE_BALLOT_HISTOGRAM
return ballot_makeUnique<histotype, checkNSame>(sumfunObj, myKey, res, outputs, keys, nSame);
#else
{
int i;
bool writeResult = myKey >= 0;
int myIdx = (threadIdx.x & 31) + 1;
outputs[(threadIdx.x & 31)] = *res;
// The assumption for sanity of this loop here is that all the data is in registers or shared memory and
// hence this loop will not actually be __that__ slow.. Also it helps if the data is spread out (ie. there are
// a lot of different indices here)
for (i = 1; i < 32 && writeResult; i++)
{
if (myIdx >= 32)
myIdx = 0;
// Is my index the same as the index on the index-list?
if (keys[myIdx] == myKey /*&& threadIdx.x != myIdx*/)
{
if (checkNSame) (*nSame)++;
// If yes, then we can sum up the result using users sum-functor
*res = sumfunObj(*res, outputs[myIdx]);
// But if somebody else is summing up this index already, we don't need to (wasted effort done here)
if (myIdx < threadIdx.x)
writeResult = false;
}
myIdx++;
}
// Ok - we are done - now we can proceed in writing the result (if some other thread isn't doing it already)
if (checkNSame)
{
// Manual reduce
int tid = threadIdx.x;
keys[tid] = *nSame;
if (tid < 16) keys[tid] = keys[tid] > keys[tid + 16] ? keys[tid] : keys[tid+16];
if (tid < 8) keys[tid] = keys[tid] > keys[tid + 8] ? keys[tid] : keys[tid+8];
if (tid < 4) keys[tid] = keys[tid] > keys[tid + 4] ? keys[tid] : keys[tid+4];
if (tid < 2) keys[tid] = keys[tid] > keys[tid + 2] ? keys[tid] : keys[tid+2];
if (tid < 1) keys[tid] = keys[tid] > keys[tid + 1] ? keys[tid] : keys[tid+1];
*nSame = keys[0];
}
return writeResult;
}
#endif
}
static inline __host__ __device__
void checkStrategyFun(bool *reduce, int nSame, int nSameTot, int step, int nBinSetslog2)
{
#if __CUDA_ARCH__ >= 200
#define STR_LIMIT 12
#else
#define STR_LIMIT 24
#endif
// TODO: Fix average case - a lot of things to tune here...
if ((nSameTot > STR_LIMIT * step || nSame > STR_LIMIT))
*reduce = true;
else
*reduce = false;
#undef STR_LIMIT
}
// Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up)
template <typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAdd2(float* addr, float val, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 200
atomicAdd(addr, val);
#else
myAtomicAdd(addr, val, key, sumFunObj);
#endif
}
template <typename SUMFUNTYPE,typename OUTPUTTYPE>
static inline __device__
void wrapAtomicAdd2(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj)
{
atomicAdd(addr, val);
}
// Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up)
template <bool laststeps, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAdd2Warp(float* addr, float val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 200
if (Iwrite) atomicAdd(addr, val);
#else
myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex);
#endif
}
template <bool laststeps, typename SUMFUNTYPE,typename OUTPUTTYPE>
static inline __device__
void wrapAtomicAdd2Warp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
if (Iwrite) atomicAdd(addr, val);
}
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2(addr, val, key, sumFunObj);
#else
myAtomicAdd(addr, val, key, sumFunObj);
#endif
}
template <typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicInc(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2((int*)addr, 1, key, sumFunObj);
#else
//myAtomicAdd((int*)addr, 1, key, sumFunObj);
#endif
}
template <typename SUMFUNTYPE>
static inline __device__
void wrapAtomicInc(int* addr, int* key, SUMFUNTYPE sumFunObj)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2(addr, 1, key, sumFunObj);
#else
myAtomicAdd(addr, 1, key, sumFunObj);
#endif
}
template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicAddWarp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2Warp<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex);
#else
myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex);
#endif
}
template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicIncWarp(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2Warp<laststeps>((int*)addr, 1, key, sumFunObj, Iwrite, warpmutex);
#else
//myAtomicAdd((int*)addr, 1, key, sumFunObj);
#endif
}
template <bool laststeps, typename SUMFUNTYPE>
static inline __device__
void wrapAtomicIncWarp(int* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex)
{
//*addr = val;
#if __CUDA_ARCH__ >= 120
wrapAtomicAdd2Warp<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex);
#else
myAtomicWarpAdd<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex);
#endif
}
// TODO: Consider the following:
// First private hash for each warp - later, share hash-tables between warps
// Try also: private hashes for some threads of one warp etc
template <typename OUTPUTTYPE>
struct myHash
{
int* keys;
#if !USE_ATOMICS_HASH
int* locks;
#endif
OUTPUTTYPE* vals;
OUTPUTTYPE* myBlockOut;
};
template <typename OUTPUTTYPE>
static inline __device__
void InitHash(struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE zero, int hashSizelog2)
{
int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2;
int* myEntry = &hash->keys[threadIdx.x];
for (int i = 0; i < nloops; i++)
{
*myEntry = -1;
myEntry += LBLOCK_SIZE;
}
if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2))
{
*myEntry = -1;
}
// Done
}
#if 0 // OLD code
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void FlushHash(struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2)
{
int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2;
OUTPUTTYPE* myVal = &hash->vals[threadIdx.x];
int* key = &hash->keys[threadIdx.x];
for (int i = 0; i < nloops; i++) {
int keyIndex = *key;
if (keyIndex >= 0) {
hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]);
*key = -1;
}
key += LBLOCK_SIZE;
myVal += LBLOCK_SIZE;
}
if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2))
{
int keyIndex = *key;
if (keyIndex >= 0){
hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]);
*key = -1;
}
}
}
#endif // 0
// See: http://www.burtleburtle.net/bob/hash/doobs.html
// Mix by Bob Jenkins
#define HISTO_JENKINS_MIX(A, B, C) \
do { \
A -= B; A -= C; A ^= (C>>13); \
B -= C; B -= A; B ^= (A<<8); \
C -= A; C -= B; C ^= (B>>13); \
A -= B; A -= C; A ^= (C>>12); \
B -= C; B -= A; B ^= (A<<16); \
C -= A; C -= B; C ^= (B>>5); \
A -= B; A -= C; A ^= (C>>3); \
B -= C; B -= A; B ^= (A<<10); \
C -= A; C -= B; C ^= (B>>15); \
} while (0)
static inline __device__
unsigned int histogramHashFunction(int key)
{
#if USE_JENKINS_HASH
unsigned int a = (unsigned int)key;
unsigned int c,b;
// TODO: What are good constants?
b = 0x9e3779b9;
c = 0xf1232345;
HISTO_JENKINS_MIX(a, b, c);
return c;
#else
// Golden ratio hash
return (0x9e3779b9u * (unsigned int)key);
#endif
}
#if USE_ATOMICS_HASH
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique)
{
if (unique)
{
if (Iwrite)
{
hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]);
}
return;
}
unsigned int hashkey = histogramHashFunction(myKey);
volatile __shared__ bool hashFull;
int index = (int)(hashkey >> (32 - hashSizelog2));
bool Iamdone = !Iwrite;
bool IFlush = Iwrite;
hashFull = true;
while (hashFull)
{
// Mark here hash full, and if any thread has problems finding
// free entry in hash, then that thread sets hashFull to nonzero
if (threadIdx.x == 0) hashFull = false;
// Do atomic-part
int old = -2;
int expect = -1;
while (!Iamdone && !hashFull)
{
old = atomicCAS(&hash->keys[index], expect, -3);
if (old == expect) // We won!
{
int key = old;
if (key == -1 || key == myKey)
{
if (key == -1)
{
hash->vals[index] = res;
}
else
{
hash->vals[index] = sumfunObj(res, hash->vals[index]);
IFlush = false;
}
hash->keys[index] = myKey;
Iamdone = true;
}
else
{
hashFull = true;
hash->keys[index] = key;
expect = -1;
}
}
else
{
if (old != myKey)
{
hashFull = true;
expect = -1;
}
else
{
expect = old;
}
}
}
if (IFlush && Iamdone)
{
OUTPUTTYPE* myVal = &hash->vals[index];
int* key = &hash->keys[index];
// TODO: Workaround - get rid of if. Where do the extra flushes come from?
if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]);
//hash->myBlockOut[myKey] = sumfunObj(*myVal, hash->myBlockOut[myKey]);
*key = -1;
}
}
}
#else
template <typename SUMFUNTYPE, typename OUTPUTTYPE>
static inline __device__
void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique)
{
if (unique)
{
if (Iwrite)
{
hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]);
}
return;
}
unsigned int hashkey = histogramHashFunction(myKey);
volatile __shared__ int hashFull;
int index = (int)(hashkey >> (32 - hashSizelog2));
bool Iamdone = false;
bool IFlush = Iwrite;
// TODO: syncthreads()...
hashFull = -10;
while (hashFull != 0)
{
volatile int* lock = &hash->locks[index];
bool write = Iwrite;
#define TMP_LOCK_MAGIC 0xfffffffe
*lock = TMP_LOCK_MAGIC;
// Mark here hash full, and if any thread has problems finding
// free entry in hash, then that thread sets hashFull to nonzero
if (threadIdx.x == 0) hashFull = 0;
// Do atomic-part
while (1)
{
if (!Iamdone && write) *lock = threadIdx.x;
if (*lock == TMP_LOCK_MAGIC)
break;
if (*lock == threadIdx.x) // We won!
{
int key = hash->keys[index];
if (key == -1)
{
hash->keys[index] = myKey;
hash->vals[index] = res;
Iamdone = true;
}
else if (key == myKey)
{
hash->vals[index] = sumfunObj(res, hash->vals[index]);
Iamdone = true;
IFlush = false;
}
else
{
hashFull = 1;
}
// Do arbitrary atomic op:
write = false;
*lock = TMP_LOCK_MAGIC;
}
}
if (IFlush)
{
OUTPUTTYPE* myVal = &hash->vals[index];
int* key = &hash->keys[index];
// TODO: Workaround - get rid of if. Where do the extra flushes come from?
if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]);
*key = -1;
}
}
#undef TMP_LOCK_MAGIC
}
#endif
template <histogram_type histotype, int nMultires, bool reduce, bool checkStrategy, bool laststep, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histo_largenbin_step(INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, OUTPUTTYPE zero,
INDEXT* myStart, INDEXT end, struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE* blockOut, int nOut, int stepNum, int stepsleft, int* nSameTot, bool* reduceOut, int hashSizelog2,
OUTPUTTYPE* rOuts, int* rKeys)
{
if (!laststep)
{
if (checkStrategy)
{
int myKeys[nMultires];
int nSame = 0;
OUTPUTTYPE res[nMultires];
xformObj(input, *myStart, &myKeys[0], &res[0], nMultires);
// TODO: Unroll? addtoHash is a big function.. Hmm but, unrolling would enable registers probably
bool Iwrite;
#define ADD_ONE_RESULT(RESIDX, NSAME, CHECK) \
do { if (RESIDX < nMultires) { \
Iwrite = reduceToUnique<histotype, CHECK> \
(&res[RESIDX % nMultires], myKeys[RESIDX % nMultires], NSAME, sumfunObj, rKeys, rOuts); \
if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; \
AddToHash(res[RESIDX % nMultires], myKeys[RESIDX % nMultires], hash, sumfunObj, hashSizelog2, Iwrite, true); \
} } while (0)
ADD_ONE_RESULT(0, &nSame, true);
ADD_ONE_RESULT(1, NULL, false);
ADD_ONE_RESULT(2, NULL, false);
ADD_ONE_RESULT(3, NULL, false);
#undef ADD_ONE_RESULT
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++)
{
bool Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts);
if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;
AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, true);
}
*nSameTot += nSame;
checkStrategyFun(reduceOut, nSame, *nSameTot, stepNum, 0);
*myStart += LBLOCK_SIZE;
}
else
{
INDEXT startLim = *myStart + ((LBLOCK_SIZE << LARGE_NBIN_CHECK_INTERVAL_LOG2) - LBLOCK_SIZE);
for (; *myStart < startLim; *myStart += LBLOCK_SIZE)
{
int myKeys[nMultires];
OUTPUTTYPE res[nMultires];
xformObj(input, *myStart, &myKeys[0], &res[0], nMultires);
//#pragma unroll
bool Iwrite = true;
#define ADD_ONE_RESULT(RES) \
do { if (RES < nMultires) { \
if (reduce){ Iwrite = reduceToUnique<histotype, false>(&res[RES % nMultires], \
myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;} \
AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, \
sumfunObj, hashSizelog2, Iwrite, reduce); \
} } while (0)
ADD_ONE_RESULT(0);
ADD_ONE_RESULT(1);
ADD_ONE_RESULT(2);
ADD_ONE_RESULT(3);
#undef ADD_ONE_RESULT
for (int resid = 4; resid < nMultires; resid++)
{
bool Iwrite = true;
if (reduce){
Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts);
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;
}
AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, reduce);
}
}
}
}
else // These are the last steps then
{
for (int substep = 0; substep < stepsleft; substep++)
{
int myKeys[nMultires];
OUTPUTTYPE res[nMultires];
bool Iwrite = false;
if (*myStart < end)
{
Iwrite = true;
xformObj(input, *myStart, &myKeys[0], &res[0], nMultires);
}
else
{
#pragma unroll
for (int resid = 0; resid < nMultires; resid++)
{
res[resid] = zero;
myKeys[resid] = 0;
}
}
//#pragma unroll
{
bool Iwrite2 = Iwrite;
#define ADD_ONE_RESULT(RES) \
do { if (RES < nMultires) { \
if (reduce){ Iwrite2 = reduceToUnique<histotype, false> \
(&res[RES % nMultires], myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } \
AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, sumfunObj, hashSizelog2, Iwrite2, reduce); \
} } while(0)
ADD_ONE_RESULT(0);
ADD_ONE_RESULT(1);
ADD_ONE_RESULT(2);
ADD_ONE_RESULT(3);
#undef ADD_ONE_RESULT
for (int resid = 4; resid < nMultires; resid++)
{
//bool Iwrite2 = true;
if (reduce){
Iwrite2 = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts);
if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;
}
AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite2, reduce);
}
}
*myStart += LBLOCK_SIZE;
}
}
}
template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histo_kernel_largeNBins(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int nSteps,
int hashSizelog2)
{
extern __shared__ int keys[];
#if USE_ATOMICS_HASH
OUTPUTTYPE* vals = (OUTPUTTYPE*)(&keys[1 << hashSizelog2]);
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
vals = &keys[1 << LBLOCK_SIZE_LOG2];
#else
int* locks = &keys[1 << hashSizelog2];
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
locks = &keys[1 << LBLOCK_SIZE_LOG2];
OUTPUTTYPE* vals = (OUTPUTTYPE*)(&locks[1 << hashSizelog2]);
#endif
/*int* rKeys = (int*)(&vals[1 << hashSizelog2]);
OUTPUTTYPE* rOuts = (OUTPUTTYPE*)(&rKeys[LBLOCK_SIZE]);*/
int* rKeys = &keys[0];
OUTPUTTYPE* rOuts = vals;
struct myHash<OUTPUTTYPE> hash;
hash.keys = keys;
#if !USE_ATOMICS_HASH
hash.locks = locks;
#endif
hash.vals = vals;
// Where do we put the results from our warp (block)?
hash.myBlockOut = &blockOut[nOut * blockIdx.x];
INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << LBLOCK_SIZE_LOG2) + threadIdx.x);
// Assert that myStart is not out of bounds!
int nFullSteps = nSteps >> LARGE_NBIN_CHECK_INTERVAL_LOG2;
bool reduce = false;
InitHash(&hash, zero, hashSizelog2);
int nSameTot = 0;
for (int fstep = 0; fstep < nFullSteps; fstep++)
{
int stepNum = fstep << LARGE_NBIN_CHECK_INTERVAL_LOG2;
histo_largenbin_step<histotype, nMultires, true, true, false,INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
if (reduce) {
histo_largenbin_step<histotype, nMultires, true, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
} else {
histo_largenbin_step<histotype, nMultires, false, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
}
}
// Last steps
int nstepsleft = nSteps - (nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2);
if (nstepsleft > 0)
{
int stepNum = nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2;
if (reduce)
histo_largenbin_step<histotype, nMultires, true, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
else
histo_largenbin_step<histotype, nMultires, false, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys);
}
// Flush values still in hash
//FlushHash(&hash, sumfunObj, hashSizelog2);
}
#if USE_MEDIUM_PATH
//
template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histo_kernel_mediumNBins(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int nSteps)
{
#if __CUDA_ARCH__ >= 120
OUTPUTTYPE* ourOut = &blockOut[nOut * (threadIdx.x % MED_THREAD_DEGEN) * blockIdx.x];
INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << MEDIUM_BLOCK_SIZE_LOG2) + threadIdx.x);
bool reduce = false;
int nSameTot = 0;
for (int step = 0; step < nSteps - 1; step++)
{
bool check = false;
int myKey[nMultires];
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKey[0], &myOut[0],nMultires);
// TODO: magic constant
if ((step & 63) == 0)
check = true;
{
int nSame;
__shared__ int keys[MEDIUM_BLOCK_SIZE];
__shared__ OUTPUTTYPE rOut[MEDIUM_BLOCK_SIZE];
int warpIdx = threadIdx.x >> 5;
int* wkeys = &keys[warpIdx << 5];
OUTPUTTYPE* wOut = &rOut[warpIdx << 5];
bool Iwrite;
#define ADD_ONE_RESULT(RESID) \
do { if (RESID < nMultires) { \
if (reduce || check){ \
if (check) Iwrite = reduceToUnique<histotype, true> \
(&myOut[RESID % nMultires], myKey[RESID % nMultires], \
&nSame, sumfunObj, wkeys, wOut); \
else Iwrite = reduceToUnique<histotype, false> \
(&myOut[RESID % nMultires], myKey[RESID % nMultires], NULL, sumfunObj, \
wkeys, wOut); \
if (Iwrite) \
atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \
if (check){ \
nSameTot += nSame; \
checkStrategyFun(&reduce, nSame, nSameTot, step, 0); \
check = false; \
} \
} else { \
if (histotype == histogram_atomic_inc) \
atomicAdd(&ourOut[myKey[RESID % nMultires]], 1); \
else if (histotype == histogram_atomic_add) \
atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \
} } \
} while(0)
ADD_ONE_RESULT(0);
ADD_ONE_RESULT(1);
ADD_ONE_RESULT(2);
ADD_ONE_RESULT(3);
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++)
{
ADD_ONE_RESULT(resid);
}
}
myStart += MEDIUM_BLOCK_SIZE;
}
if (myStart < end)
{
int myKey[nMultires];
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKey[0], &myOut[0],nMultires);
for (int resid = 0; resid < nMultires; resid++)
{
if (histotype == histogram_atomic_inc)
{
atomicAdd(&ourOut[myKey[resid]], 1);
}
else if (histotype == histogram_atomic_add)
{
atomicAdd(&ourOut[myKey[resid]], myOut[resid]);
}
}
}
#endif // __CUDA_ARCH__
}
#endif // USE_MEDIUM_PATH
static int determineHashSizeLog2(size_t outSize, int* nblocks, cudaDeviceProp* props)
{
// TODO: Magic hat-constant 500 reserved for inputs, how to compute?
int sharedTot = (props->sharedMemPerBlock - 500) /* / LBLOCK_WARPS*/;
//int sharedTot = 32000;
// How many blocks of 32 keys could we have?
//int nb32Max = sharedTot / (32 * outSize);
// But ideally we should run at least 4 active blocks per SM,
// How can we balance this? Well - with very low ablock-values (a),
// we perform bad, but after 4, adding more
// will help less and less, whereas adding more to the hash always helps!
#if USE_ATOMICS_HASH
outSize += sizeof(int);
#else
outSize += sizeof(int);
#endif
int naMax = sharedTot / (32 * outSize);
while (naMax > numActiveUpperLimit) naMax >>= 1;
int nb32 = sharedTot / (32 * outSize * naMax);
// Now we have "number of pieces", use it to compute some nice power-of-two hash-size
int hashSize = nb32 * 32;
unsigned int res = 0;
if (hashSize >= 1<<16) { hashSize >>= 16; res += 16; }
if (hashSize >= 1<< 8) { hashSize >>= 8; res += 8; }
if (hashSize >= 1<< 4) { hashSize >>= 4; res += 4; }
if (hashSize >= 1<< 2) { hashSize >>= 2; res += 2; }
if (hashSize >= 1<< 1) { res += 1; }
// Now res holds the log2 of hash size => n active blocksMEDIUM_BLOCK_SIZE_LOG2 = sharedTot / (outSize << res);
*nblocks = (sharedTot / (outSize << res)) * props->multiProcessorCount;
if (*nblocks > props->multiProcessorCount * 8) *nblocks = props->multiProcessorCount * 8;
return res;
}
template <typename OUTPUTTYPE>
__global__
void initKernel(OUTPUTTYPE* tmpOut, OUTPUTTYPE zeroVal, int tmpOutSize, int steps)
{
int idx = blockIdx.x * blockDim.x * steps + threadIdx.x;
for (int step = 0; step < steps; step++)
{
if (idx < tmpOutSize)
tmpOut[idx] = zeroVal;
idx += blockDim.x;
}
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getLargeBinTmpbufsize(int nOut, cudaDeviceProp* props, int cuda_arch)
{
int nblocks;
int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props);
int arrLen = nblocks;
#if USE_MEDIUM_PATH
if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add))
arrLen *= MED_THREAD_DEGEN;
#endif
return (arrLen + 1) * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static
void callHistogramKernelLargeNBins(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
cudaDeviceProp* props, int cuda_arch, cudaStream_t stream,
int* getTmpBufSize,
void* tmpBuffer,
bool outInDev)
{
int nblocks;
int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props);
INDEXT size = end - start;
// Check if there is something to do actually...
if (end <= start)
{
if (getTmpBufSize) getTmpBufSize = 0;
return;
}
dim3 block = LBLOCK_SIZE;
dim3 grid = nblocks;
int arrLen = nblocks;
#if USE_MEDIUM_PATH
if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add))
arrLen *= MED_THREAD_DEGEN;
#endif
INDEXT nSteps = size / (INDEXT)( LBLOCK_SIZE * nblocks);
OUTPUTTYPE* tmpOut;
//int n = nblocks;
if (getTmpBufSize) {
*getTmpBufSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE);
return;
}
if (tmpBuffer){
tmpOut = (OUTPUTTYPE*)tmpBuffer;
}
else {
size_t allocSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE);
cudaMalloc((void**)&tmpOut, allocSize);
}
//printf("Using hash-based histogram: hashsize = %d, nblocksToT = %d\n", (1 << hashSizelog2), nblocks);
#if USE_ATOMICS_HASH
int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int));
#else
int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int) * 2);
#endif
// The shared memory here is needed for the reduction code (ie. reduce to unique)
// TODO: new hash-code could probably reuse the memory reserved for the hash-table,
// it would just need to reinit the keys to -1 after use - think about it.
if (cuda_arch >= 200 && histotype == histogram_atomic_inc)
{
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
extSharedNeeded += (sizeof(int) << (LBLOCK_SIZE_LOG2 - hashSizelog2));
}
else
{
if (hashSizelog2 < LBLOCK_SIZE_LOG2)
extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << (LBLOCK_SIZE_LOG2 - hashSizelog2));
}
//printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps);
{
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((arrLen * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps <<= 1;
if (nsteps * initgrid.x * IBLOCK_SIZE < arrLen * nOut)
initgrid.x++;
}
initKernel<<<initgrid,initblock,0,stream>>>(tmpOut, zero, arrLen * nOut, nsteps);
}
//int medExtShared = nOut;
//const int shLimit = 0;
//const int shLimit = 0;//16000 / 2;
// Codepath below is a lot faster for random bins, a tad faster for real use-case
// and a lot slower for degenerate key-distributions
#if USE_MEDIUM_PATH
if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add))
{
const dim3 block = MEDIUM_BLOCK_SIZE;
dim3 grid = nblocks;
INDEXT nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks);
INDEXT nFullSteps = 1;
if (nSteps <= 0)
{
nFullSteps = 0;
nblocks = (size >> MEDIUM_BLOCK_SIZE_LOG2);
if ((nblocks << MEDIUM_BLOCK_SIZE_LOG2) < size) nblocks++;
}
if (nSteps > MAX_NLHSTEPS)
{
nFullSteps = size / ( MEDIUM_BLOCK_SIZE * nblocks * MAX_NLHSTEPS);
nSteps = MAX_NLHSTEPS;
}
for (INDEXT step = 0; step < nFullSteps; step++)
{
histo_kernel_mediumNBins<histotype, nMultires><<<grid, block, 0, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps);
start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps);
}
size = end - start;
nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks);
if (nSteps > 0)
{
histo_kernel_mediumNBins<histotype, nMultires><<<grid, block, 0, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps);
start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps);
size = end - start;
}
if (size > 0)
{
int ntblocks = size / ( MEDIUM_BLOCK_SIZE );
if (ntblocks * MEDIUM_BLOCK_SIZE < size) ntblocks++;
grid.x = ntblocks;
histo_kernel_mediumNBins<histotype, nMultires><<<grid, block, 0, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1);
}
}
else
#endif // USE_MEDIUM_PATH
{
INDEXT nFullSteps = 1;
if (nSteps <= 0)
{
nFullSteps = 0;
nblocks = (size >> LBLOCK_SIZE_LOG2);
if ((nblocks << LBLOCK_SIZE_LOG2) < size) nblocks++;
}
if (nSteps > MAX_NLHSTEPS)
{
nFullSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks * MAX_NLHSTEPS);
nSteps = MAX_NLHSTEPS;
}
for (int step = 0; step < nFullSteps; step++)
{
histo_kernel_largeNBins<histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2);
start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps);
}
size = end - start;
nSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks);
if (nSteps > 0)
{
histo_kernel_largeNBins<histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2);
start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps);
size = end - start;
}
if (size > 0)
{
int ntblocks = size / ( LBLOCK_SIZE );
if (ntblocks * LBLOCK_SIZE < size) ntblocks++;
grid.x = ntblocks;
histo_kernel_largeNBins<histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1, hashSizelog2);
}
}
#if H_ERROR_CHECKS
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror = %s\n", cudaGetErrorString( error ));
#endif
// OK - so now tmpOut contains our gold - we just need to dig it out now
enum cudaMemcpyKind fromOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice;
enum cudaMemcpyKind toOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost;
if (stream != 0)
cudaMemcpyAsync(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream);
else
cudaMemcpy(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut);
grid.x = nOut;
//grid.x = nOut >> LBLOCK_SIZE_LOG2;
//if ((grid.x << LBLOCK_SIZE_LOG2) < nOut) grid.x++;
block.x = GATHER_BLOCK_SIZE;
gatherKernel<<<grid, block, 0, stream>>>(sumfunObj, tmpOut, nOut, arrLen /** LBLOCK_WARPS*/, zero);
// TODO: Async copy here also???
if (outInDev && stream != 0)
cudaMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream);
else
cudaMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut);
// CPU-code path for debugging here:
/* {
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(nblocks * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
cudaMemcpy(h_tmp, tmpOut, nblocks*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < nblocks; i++)
{
res = sumfunObj(res, h_tmp[i * nOut + resIdx]);
}
out[resIdx] = sumfunObj(res, out[resIdx]);
}
free(h_tmp);
}
*/
if (!tmpBuffer)
cudaFree(tmpOut);
}
static int determineNKeySetsLog2(size_t size_out, int nOut, cudaDeviceProp* props)
{
// 32 threads per block, one block shares one binset
// Go for 2x occupancy = 64 active threads per block
// Hence if we have NBinSets, then we need tot_size x nOut x NBinSets x 2 bytes of shared
// On sm_20 we have 48 000 bytes and on sm_1x 16 000
// Hence nbinsets = SharedMem / (2 * tot_size * nOut)
// For example sm_20, 16 int bins:
// nbinsets = 48000 / 2 * 4 * 16 = 48000 / 2*64 = 48000 / 128 = 375...
// More than enough, but is it enough active threadblocks??
int nBytesShared = 16000;
size_t sizetot = size_out + sizeof(int);
int nBinSets = nBytesShared / (sizetot * 2 * nOut);
// NOTE: Disabling for now - advantages seem nonexistent
// if (nBinSets >= 32) return 5;
// if (nBinSets >= 16) return 4;
// if (nBinSets >= 8) return 3;
// if (nBinSets >= 4) return 2;
// if (nBinSets >= 2) return 1;
if (nBinSets >= 1) return 0;
return -1;
}
#if __CUDA_ARCH__ >= 200
template <int nMultires>
static inline __device__
bool checkForReduction (int* myKeys, int* rkeys)
{
// Idea - if there is a large number of degenerate entries then we don't need to check them all for degeneracy
// TODO: Implement the wonderful idea
//return ((threadIdx.x >> 5) & 3) < 3;
#if 1
bool myKeyDegenerate;
//TAKE_WARP_MUTEX(0);
rkeys[threadIdx.x & 31] = myKeys[0];
// Check two thirds
myKeyDegenerate =
(myKeys[0] == (rkeys[(threadIdx.x + 1) & 31]))
/*||
(myKeys[0] == (rkeys[(threadIdx.x + 8) & 31]))*/;
//GIVE_WARP_MUTEX(0);
unsigned int degenMask = __ballot(myKeyDegenerate);
// Estimate number of degenerate keys - if all are degenerate, the estimate is accurate
int nDegen = __popc(degenMask);
if (nDegen > HISTOGRAM_DEGEN_LIMIT)
return true;
else
return false;
#endif
}
#endif
template <histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histogramKernel_stepImpl(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT end,
OUTPUTTYPE zero,
int nOut, INDEXT startidx,
OUTPUTTYPE* bins, int* locks,
OUTPUTTYPE* rvals, int* rkeys,
int* doReduce, bool checkReduce,
int* warpmutex)
{
int myKeys[nMultires];
OUTPUTTYPE vals[nMultires];
bool doWrite = true;
if (laststeps){
if (startidx < end)
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
else
{
doWrite = false;
#pragma unroll
for (int r = 0; r < nMultires; r++){
vals[r] = zero;
myKeys[r] = -1;
}
}
}
else
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
// See keyIndex-reasoning above
int binSet = (threadIdx.x & ((1 << nBinSetslog2) - 1));
#if __CUDA_ARCH__ >= 200
/* if (laststeps){
*doReduce = false;
}
else*/
{
if (checkReduce){
*doReduce = checkForReduction<nMultires>(myKeys, rkeys);
if (histotype == histogram_generic || histotype == histogram_atomic_add){
__shared__ int tmp;
tmp = 0;
__syncthreads();
if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1);
__syncthreads();
if (tmp > HBLOCK_SIZE / 2)
*doReduce = true;
else
*doReduce = false;
}
//if (laststeps) *doReduce = false;
/* __syncthreads();
bool tmpred = checkForReduction<nMultires>(myKeys, rkeys);
if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred);
__syncthreads();*/
}
}
#endif
// TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?)
// TODO: How to avoid bank-conflicts? Any way to avoid?
#if __CUDA_ARCH__ >= 200
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \
if (*doReduce){\
if (histotype == histogram_generic || histotype == histogram_atomic_add){\
bool Iwrite;\
TAKE_WARP_MUTEX(0);\
Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
if (Iwrite && doWrite) bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\
/*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\
else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\
GIVE_WARP_MUTEX(0);\
} else { \
bool Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex); \
}\
} else {\
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
}\
} } } while (0)
#else
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\
}\
} } while (0)
#endif
ONE_HS_STEP(0);
ONE_HS_STEP(1);
ONE_HS_STEP(2);
ONE_HS_STEP(3);
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++){
ONE_HS_STEP(resid);
}
#undef ONE_HS_STEP
}
template <int nBinSetslog2, histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histogramKernel_sharedbins_new(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int outStride,
int nSteps)
{
extern __shared__ int cudahistogram_binstmp[];
OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp);
int* locks = (int*)&bins[(nOut << nBinSetslog2)];
int* rkeys = NULL;
OUTPUTTYPE* rvals = NULL;
//__shared__
int warpmutex;
//INIT_WARP_MUTEX2(warpmutex);
#if __CUDA_ARCH__ >= 200
int warpId = threadIdx.x >> 5;
if (histotype == histogram_generic)
rkeys = &locks[(nOut << nBinSetslog2)];
else
rkeys = locks;
rvals = (OUTPUTTYPE*)&rkeys[32];
if (histotype == histogram_atomic_inc){
rkeys = &rkeys[warpId << 5];
//rvals = &rvals[warpId << 5];
}
#endif
const int nBinSets = 1 << nBinSetslog2;
// Reset all bins to zero...
for (int j = 0; j < ((nOut << nBinSetslog2) >> HBLOCK_SIZE_LOG2) + 1; j++)
{
int bin = (j << HBLOCK_SIZE_LOG2) + threadIdx.x;
if (bin < (nOut << nBinSetslog2)){
bins[bin] = zero;
}
}
#if HBLOCK_SIZE > 32
__syncthreads();
#endif
int outidx = blockIdx.x;
INDEXT startidx = (INDEXT)((outidx * nSteps) * HBLOCK_SIZE + start + threadIdx.x);
/*__shared__*/ int doReduce; // local var - TODO: Is this safe??
doReduce = 0;
#define MED_UNROLL_LOG2 2
#define MED_UNROLL (1 << MED_UNROLL_LOG2)
int step;
for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++)
{
//#pragma unroll
//for (int substep = 0; substep < MED_UNROLL; substep++){
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex);
startidx += HBLOCK_SIZE;
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex);
startidx += HBLOCK_SIZE;
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex);
startidx += HBLOCK_SIZE;
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex);
startidx += HBLOCK_SIZE;
//}
}
step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2;
for (; step < nSteps ; step++)
{
histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex);
startidx += HBLOCK_SIZE;
}
#undef MED_UNROLL
#undef MED_UNROLL_LOG2
#if HBLOCK_SIZE > 32
__syncthreads();
#endif
// Finally put together the bins
for (int j = 0; j < (nOut >> HBLOCK_SIZE_LOG2) + 1; j++) {
int key = (j << HBLOCK_SIZE_LOG2) + threadIdx.x;
if (key < nOut)
{
OUTPUTTYPE res = blockOut[key * outStride + outidx];
//int tmpBin = bin;
#pragma unroll
for (int k = 0; k < nBinSets; k++)
{
//tmpBin += nOut;
res = sumfunObj(res, bins[(key << nBinSetslog2) + k]);
}
//printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin);
blockOut[key * outStride + outidx] = res;
}
}
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getMediumHistoTmpbufSize(int nOut, cudaDeviceProp* props)
{
int nblocks = props->multiProcessorCount * 8;
// NOTE: The other half is used by multireduce...
return 2 * nblocks * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
void callHistogramKernelImpl(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
cudaDeviceProp* props,
cudaStream_t stream,
size_t* getTmpBufSize,
void* tmpBuffer,
bool outInDev,
int cuda_arch)
{
INDEXT size = end - start;
// Check if there is something to do actually...
if (end <= start)
{
if (getTmpBufSize) *getTmpBufSize = 0;
return;
}
int nblocks = props->multiProcessorCount * 8;
// Assert that our grid is not too large!
//MY_ASSERT(n < 65536 && "Sorry - currently we can't do such a big problems with histogram-kernel...");
// One entry for each output for each thread-block:
//OUTPUTTYPE* tmpOut = (OUTPUTTYPE*)parallel_alloc(MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
OUTPUTTYPE* tmpOut;
if (getTmpBufSize)
{
// NOTE: The other half is used by multireduce...
*getTmpBufSize = 2 * nblocks * nOut * sizeof(OUTPUTTYPE);
return;
}
int nsteps = size / ( nblocks * HBLOCK_SIZE );
if (nsteps * nblocks * HBLOCK_SIZE < size) nsteps++;
if (nsteps > MAX_NHSTEPS)
nsteps = MAX_NHSTEPS;
if (tmpBuffer)
{
char* tmpptr = (char*)tmpBuffer;
tmpOut = (OUTPUTTYPE*)tmpBuffer;
tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)];
}
else
{
cudaMalloc((void**)&tmpOut, nblocks * nOut * sizeof(OUTPUTTYPE));
}
/* For block size other that power of two:
const dim3 grid = size / BLOCK_SIZE +
( size % BLOCK_SIZE == 0 ? 0 : 1 );
*/
//MY_ASSERT(size > 0);
//cudaMemsetAsync(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE), CURRENT_STREAM() );
//cudaMemset(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE) );
{
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps <<= 1;
if (nsteps * initgrid.x * IBLOCK_SIZE < nblocks * nOut)
initgrid.x++;
}
initKernel<<<initgrid,initblock,0,stream>>>(tmpOut, zero, nblocks * nOut, nsteps);
#undef IBLOCK_SIZE_LOG2
#undef IBLOCK_SIZE
}
int nKeysetslog2 = determineNKeySetsLog2(sizeof(OUTPUTTYPE), nOut, props);
if (nKeysetslog2 < 0) nKeysetslog2 = 0;
int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE)); // bins
if (histotype == histogram_generic || cuda_arch < 130)
extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int)); // locks
if (cuda_arch >= 200)
{
// Reduction stuff:
if (histotype == histogram_generic || histotype == histogram_atomic_add)
{
extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values
}
else
{
extSharedNeeded += (sizeof(int) << HBLOCK_SIZE_LOG2); // keys per warp of one thread
}
}
/*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HBLOCK_SIZE);
if (nOut < HBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HBLOCK_SIZE - nOut);
if (cuda_arch < 130)
extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/
//printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps);
int nOrigBlocks = nblocks;
INDEXT myStart = start;
while(myStart < end)
{
bool lastStep = false;
if (myStart + nsteps * nblocks * HBLOCK_SIZE > end)
{
size = end - myStart;
nsteps = (size) / (nblocks * HBLOCK_SIZE);
if (nsteps < 1)
{
lastStep = true;
nsteps = 1;
nblocks = size / HBLOCK_SIZE;
if (nblocks * HBLOCK_SIZE < size)
nblocks++;
}
}
dim3 grid = nblocks;
dim3 block = HBLOCK_SIZE;
switch (nKeysetslog2)
{
case 0:
if (lastStep)
histogramKernel_sharedbins_new<0, histotype, nMultires, true><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps);
else
histogramKernel_sharedbins_new<0, histotype, nMultires, false><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps);
break;
/* case 1:
histogramKernel_sharedbins_new<1, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 2:
histogramKernel_sharedbins_new<2, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 3:
histogramKernel_sharedbins_new<3, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 4:
histogramKernel_sharedbins_new<4, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;
case 5:
histogramKernel_sharedbins_new<5, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps);
break;*/
case -1:
// TODO: Error?
//assert(0); // "Sorry - not implemented yet"
break;
}
myStart += nsteps * nblocks * HBLOCK_SIZE;
}
#if H_ERROR_CHECKS
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror = %s\n", cudaGetErrorString( error ));
#endif
// OK - so now tmpOut contains our gold - we just need to dig it out now
callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev);
// Below same as host-code
#if 0
{
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
cudaMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < n; i++)
{
res = sumfunObj(res, h_tmp[i + resIdx * n]);
}
out[resIdx] = res;
}
free(h_tmp);
}
#endif
//parallel_free(tmpOut, MemType_DEV);
if (!tmpBuffer)
cudaFree(tmpOut);
}
template <typename OUTTYPE>
static
bool binsFitIntoShared(int nOut, OUTTYPE zero, cudaDeviceProp* props, int cuda_arch)
{
// Assume here we can only use 16kb of shared in total per SM
// Also lets take minimal of 2 threads per functional unit active, in
// order to be able to hide at least some latencies - for Fermi this means 32 * 2 = 64
// of active threads needed in total (Note: This is minimal and will hurt perf).
// Also we run blocks of 32 threads and each block needs its own bin - therefore
// we need in total 2 full bin-sets per SM plus 32 bins for the one for the working part
// of the algorithm.
// Due to these considerations we infer that we can fit it nicely in, if
// (4 binsets x Nbins/binset + 32) x sizeof(OUTYPE) < 16kib - let's take here 16kb to have some room
// for required parameters
// Example: 64 doubles: 8bytes per number double => (4 * 64 + 32) * 8bytes = 288 * 8 bytes = 2304 bytes -> Easy
// How many bins of doubles can we do with these limits?
// ( 4 * x + 32) * 8bytes = 16000 bytes <=> 4x = 2000 - 32 => x = 2000/4 - 32/4 = 500 - 8 = 492 bins.
// TODO: A possibly faster version of this would be to share one set of bins over as many warps as possible
// for example, if we would use 512 threads = 16 warps, then this would be fine for hiding probably all major latencies
// and we could get away with just one binset on SM:
// ( x + 512 ) * 8bytes = 16000 bytes <=> x = 2000 - 512 = 1488 bins! With better latency-hiding
// On the other hand this requires atomic operations on the shared memory, which could be somewhat slower on
// arbitrary types, but all in all, this would seem to provide a better route. At least worth investigating...
int shlimit = props->sharedMemPerBlock - 300;
int limit = shlimit;
// TODO: Pessimistic limit
int need = (sizeof(zero) + sizeof(int)) * nOut;
if (cuda_arch >= 200)
need += HBLOCK_SIZE * sizeof(int) + 32 * sizeof(zero);
if (need <= limit)
return true;
return false;
}
template <bool subHisto, histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histogramKernel_stepImplMulti(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT end,
OUTPUTTYPE zero,
int subsize, INDEXT startidx,
OUTPUTTYPE* bins, int* locks,
OUTPUTTYPE* rvals, int* rkeys,
int* doReduce, bool checkReduce,
int* warpmutex, int binOffset)
{
int myKeys[nMultires];
OUTPUTTYPE vals[nMultires];
bool doWrite = true;
if (laststeps){
if (startidx < end)
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
else
{
doWrite = false;
#pragma unroll
for (int r = 0; r < nMultires; r++){
vals[r] = zero;
myKeys[r] = -1;
}
}
}
else
{
xformObj(input, startidx, &myKeys[0], &vals[0], nMultires);
}
#if __CUDA_ARCH__ >= 200
/* if (laststeps){
*doReduce = false;
}
else*/
{
if (checkReduce){
*doReduce = checkForReduction<nMultires>(myKeys, rkeys);
if (histotype == histogram_generic || histotype == histogram_atomic_add){
__shared__ int tmp;
tmp = 0;
__syncthreads();
if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1);
__syncthreads();
if (tmp > HMBLOCK_SIZE / 2)
*doReduce = true;
else
*doReduce = false;
}
//if (laststeps) *doReduce = false;
/* __syncthreads();
bool tmpred = checkForReduction<nMultires>(myKeys, rkeys);
if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred);
__syncthreads();*/
}
}
#endif
// TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?)
// TODO: How to avoid bank-conflicts? Any way to avoid?
#if __CUDA_ARCH__ >= 200
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \
bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\
if (!Iwrite) keyIndex = 0; \
if (*doReduce){\
if (histotype == histogram_generic || histotype == histogram_atomic_add){\
TAKE_WARP_MUTEX(0);\
bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
if (Iwrite && Iwrite2) \
bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\
/*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\
else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\
GIVE_WARP_MUTEX(0);\
} else { \
bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && Iwrite2, warpmutex); \
}\
} else {\
if (!Iwrite) keyIndex = 0;\
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
}\
} } } while (0)
#else
#define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \
int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \
bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\
if (!Iwrite) keyIndex = 0;\
if (histotype == histogram_generic)\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_add)\
wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else if (histotype == histogram_atomic_inc)\
wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
else{\
myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\
}\
} } while (0)
#endif
ONE_HS_STEP(0);
ONE_HS_STEP(1);
ONE_HS_STEP(2);
ONE_HS_STEP(3);
//#pragma unroll
for (int resid = 4; resid < nMultires; resid++){
ONE_HS_STEP(resid);
}
#undef ONE_HS_STEP
}
template <histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
__global__
void histogramKernel_multipass(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut,
int outStride,
int nSteps,
int subsize)
{
extern __shared__ int cudahistogram_binstmp[];
OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp);
int* locks = (int*)&bins[subsize];
int* rkeys = NULL;
OUTPUTTYPE* rvals = NULL;
//__shared__
int warpmutex;
//INIT_WARP_MUTEX2(warpmutex);
#if __CUDA_ARCH__ >= 200
int warpId = threadIdx.x >> 5;
if (histotype == histogram_generic)
rkeys = &locks[subsize];
else
rkeys = locks;
rvals = (OUTPUTTYPE*)&rkeys[32];
if (histotype == histogram_atomic_inc){
rkeys = &rkeys[warpId << 5];
//rvals = &rvals[warpId << 5];
}
#endif
// Reset all bins to zero...
for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++)
{
int bin = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x;
if (bin < subsize){
bins[bin] = zero;
}
}
#if HMBLOCK_SIZE > 32
__syncthreads();
#endif
int outidx = blockIdx.y;
int binOffset = blockIdx.x * subsize;
INDEXT startidx = (INDEXT)((outidx * nSteps) * HMBLOCK_SIZE + start + threadIdx.x);
int doReduce; // local var - TODO: Is this safe??
doReduce = 0;
#define MED_UNROLL_LOG2 2
#define MED_UNROLL (1 << MED_UNROLL_LOG2)
int step;
for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++)
{
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
}
step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2;
for (; step < nSteps ; step++)
{
histogramKernel_stepImplMulti<true, histotype, 0, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE>
(input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex, binOffset);
startidx += HMBLOCK_SIZE;
}
#undef MED_UNROLL
#undef MED_UNROLL_LOG2
#if HMBLOCK_SIZE > 32
__syncthreads();
#endif
// Finally put together the bins
for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++) {
int key = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x;
if (key < subsize)
{
OUTPUTTYPE res = blockOut[(key + binOffset) * outStride + outidx];
//int tmpBin = bin;
res = sumfunObj(res, bins[key]);
//printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin);
blockOut[(key + binOffset) * outStride + outidx] = res;
}
}
}
static int determineSubHistoSize(int nOut, size_t outsize, histogram_type histotype, int cuda_arch, cudaDeviceProp* props)
{
int shlimit = props->sharedMemPerBlock - 300;
int neededPerKey = outsize;
if (histotype == histogram_generic || cuda_arch < 130)
neededPerKey += (sizeof(int)); // locks
int neededConst = 0;
if (cuda_arch >= 200)
{
// Reduction stuff:
if (histotype == histogram_generic || histotype == histogram_atomic_add)
{
neededConst += (outsize + sizeof(int)) << 5; // reduction values
}
else
{
neededConst += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread
}
}
int result = (shlimit - neededConst) / (2*neededPerKey);
int res = 0;
if (result >= 1<<16) { result >>= 16; res += 16; }
if (result >= 1<< 8) { result >>= 8; res += 8; }
if (result >= 1<< 4) { result >>= 4; res += 4; }
if (result >= 1<< 2) { result >>= 2; res += 2; }
if (result >= 1<< 1) { res += 1; }
return (1 << res);
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getMultipassBufSize(int nOut, cudaDeviceProp* props, int cuda_arch)
{
int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props);
int nDegenBlocks = nOut / subsize;
if (subsize * nDegenBlocks < nOut) nDegenBlocks++;
int nblocks = props->multiProcessorCount;
if (nDegenBlocks < 8)
nblocks = props->multiProcessorCount * 8 / nDegenBlocks;
//int nblocks = props->multiProcessorCount * 8;
// NOTE: The other half is used by multireduce...
//printf("getMultipassBufSize(%d) = %d\n", nOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE));
return 2 * nblocks * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
void callHistogramKernelMultiPass(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
cudaDeviceProp* props,
cudaStream_t stream,
void* tmpBuffer,
bool outInDev,
int cuda_arch)
{
INDEXT size = end - start;
if (end <= start)
return;
//int debugs = 0;
int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props);
int nDegenBlocks = nOut / subsize;
if (subsize * nDegenBlocks < nOut) nDegenBlocks++;
int nblocks = props->multiProcessorCount;
if (nDegenBlocks < 8)
nblocks = props->multiProcessorCount * 8 / nDegenBlocks;
OUTPUTTYPE* tmpOut;
int nsteps = size / ( nblocks * HMBLOCK_SIZE );
if (nsteps * nblocks * HMBLOCK_SIZE < size) nsteps++;
if (nsteps > MAX_MULTISTEPS)
nsteps = MAX_MULTISTEPS;
//printf(" <debugstep = %d> ", debugs++);
bool userBuffer = false;
if (tmpBuffer)
{
char* tmpptr = (char*)tmpBuffer;
tmpOut = (OUTPUTTYPE*)tmpBuffer;
tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)];
userBuffer = true;
//printf("tmpBuffer = &tmpptr[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE));
}
else
{
cudaMalloc((void**)&tmpOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE));
//printf("tmpOut = malloc(%d)\n", 2 * nblocks * nOut * sizeof(OUTPUTTYPE));
//tmpBuffer = (void*)&tmpOut[nblocks * nOut * sizeof(OUTPUTTYPE)];
//printf("tmpBuffer = &tmpOut[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE));
}
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps2 = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps2 <<= 1;
if (nsteps2 * initgrid.x * IBLOCK_SIZE < nblocks * nOut)
initgrid.x++;
}
initKernel<<<initgrid,initblock,0,stream>>>(tmpOut, zero, nblocks * nOut, nsteps2);
#undef IBLOCK_SIZE_LOG2
#undef IBLOCK_SIZE
int extSharedNeeded = subsize * (sizeof(OUTPUTTYPE)); // bins
if (histotype == histogram_generic || cuda_arch < 130)
extSharedNeeded += subsize * (sizeof(int)); // locks
if (cuda_arch >= 200)
{
// Reduction stuff:
if (histotype == histogram_generic || histotype == histogram_atomic_add)
{
extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values
}
else
{
extSharedNeeded += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread
}
}
//printf(" <debugstep(init) = %d> ", debugs++);
/*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HMBLOCK_SIZE);
if (nOut < HMBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HMBLOCK_SIZE - nOut);
if (cuda_arch < 130)
extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/
//printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps);
int nOrigBlocks = nblocks;
INDEXT myStart = start;
while(myStart < end)
{
bool lastStep = false;
if (myStart + nsteps * nblocks * HMBLOCK_SIZE > end)
{
size = end - myStart;
nsteps = (size) / (nblocks * HMBLOCK_SIZE);
if (nsteps < 1)
{
lastStep = true;
nsteps = 1;
nblocks = size / HMBLOCK_SIZE;
if (nblocks * HMBLOCK_SIZE < size)
nblocks++;
}
}
dim3 grid;
grid.y = nblocks;
grid.x = nDegenBlocks;
dim3 block = HMBLOCK_SIZE;
//printf(" <debugstep(main) = %d> ", debugs++);
if (lastStep)
histogramKernel_multipass<histotype, nMultires, true><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize);
else
histogramKernel_multipass<histotype, nMultires, false><<<grid, block, extSharedNeeded, stream>>>(
input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize);
myStart += nsteps * nblocks * HMBLOCK_SIZE;
}
#if H_ERROR_CHECKS
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror = %s\n", cudaGetErrorString( error ));
#endif
// OK - so now tmpOut contains our gold - we just need to dig it out now
//printf(" <debugstep(out) = %d> ", debugs++);
//printf("callMultiReduce(%d, %d,...)\n", nOrigBlocks, nOut);
callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev);
//printf(" <debugstep(multireduce) = %d> ", debugs++);
#if H_ERROR_CHECKS
error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror(reduce) = %s\n", cudaGetErrorString( error ));
#endif
// Below same as host-code
#if 0
{
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
cudaMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < n; i++)
{
res = sumfunObj(res, h_tmp[i + resIdx * n]);
}
out[resIdx] = res;
}
free(h_tmp);
}
#endif
//parallel_free(tmpOut, MemType_DEV);
if (!userBuffer)
cudaFree(tmpOut);
}
template <bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histoKernel_smallBinStep(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT myStart, INDEXT end,
OUTPUTTYPE* mySHBins)
{
int myKeys[nMultires];
if (lastSteps)
{
if (myStart < end)
{
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2;
mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]);
}
}
}
else
{
OUTPUTTYPE myOut[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2;
mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]);
}
}
}
template <bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
__global__
void histoKernel_smallBin(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut, int maxblocks,
int nSteps)
{
// Take care with extern - In order to have two instances of this template the
// type of the extern variables cannot change
// (ie. cannot use "extern __shared__ OUTPUTTYPE bins[]")
extern __shared__ int cudahistogram_allbinstmp[];
OUTPUTTYPE* allbins = (OUTPUTTYPE*)&(*cudahistogram_allbinstmp);
OUTPUTTYPE* mySHBins = &allbins[threadIdx.x];
OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x];
INDEXT myStart = start + (INDEXT)((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + (INDEXT)threadIdx.x;
for (int bin = 0; bin < nOut /*- nLocVars*/; bin++)
mySHBins[bin << SMALL_BLOCK_SIZE_LOG2] = zero;
// Run loops - unroll 8 steps manually
int doNSteps = (nSteps) >> 3;
for (int step = 0; step < doNSteps; step++)
{
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 2*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 3*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 4*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 5*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 6*SMALL_BLOCK_SIZE, end, mySHBins);
histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 7*SMALL_BLOCK_SIZE, end, mySHBins);
myStart += 8*SMALL_BLOCK_SIZE;
}
int nStepsLeft = (nSteps) - (doNSteps << 3);
for (int step = 0; step < nStepsLeft; step++)
{
histoKernel_smallBinStep<true, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins);
myStart += SMALL_BLOCK_SIZE;
}
// In the end combine results:
#if SMALL_BLOCK_SIZE > 32
__syncthreads();
#endif
// Do first shared stuff:
int keyIndex = threadIdx.x;
while (keyIndex < nOut)
{
OUTPUTTYPE* binResults = &allbins[keyIndex << SMALL_BLOCK_SIZE_LOG2];
OUTPUTTYPE result = ourOut[keyIndex];
for (int tidx = 0; tidx < SMALL_BLOCK_SIZE; tidx++){
result = sumfunObj(result, *binResults++);
}
ourOut[keyIndex] = result;
keyIndex += SMALL_BLOCK_SIZE;
}
}
static inline __device__
int resultToInt(int resultin){ return resultin; }
static inline __device__
int resultToInt(long resultin){ return (int)resultin; }
static inline __device__
int resultToInt(long long resultin){ return (int)resultin; }
static inline __device__
int resultToInt(unsigned int resultin){ return (int)resultin; }
static inline __device__
int resultToInt(unsigned long resultin){ return (int)resultin; }
static inline __device__
int resultToInt(unsigned long long resultin){ return (int)resultin; }
template<typename OUTPUTTYPE>
static inline __device__
int resultToInt(OUTPUTTYPE resultin){ return 0; }
static inline __device__
void intToResult(int resultin, int& resultOut){ resultOut = resultin; }
static inline __device__
void intToResult(int resultin, long& resultOut){ resultOut = (long)resultin; }
static inline __device__
void intToResult(int resultin, unsigned int& resultOut){ resultOut = (unsigned )resultin; }
static inline __device__
void intToResult(int resultin, long long& resultOut){ resultOut = (long long)resultin; }
static inline __device__
void intToResult(int resultin, unsigned long& resultOut){ resultOut = (unsigned long)resultin; }
static inline __device__
void intToResult(int resultin, unsigned long long& resultOut){ resultOut = (unsigned long long)resultin; }
template<typename OUTPUTTYPE>
static inline __device__
void intToResult(int resultin, OUTPUTTYPE& resultout){ ; }
template <bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static inline __device__
void histoKernel_smallBinByteOneStep(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT myStart, INDEXT end,
volatile unsigned char* mySHBins,
OUTPUTTYPE zero
)
{
if (lastSteps)
{
if (myStart < end)
{
OUTPUTTYPE myOut[nMultires];
int myKeys[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
// index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid]
// Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops...
int index = (((myKeys[res]) >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (myKeys[res] & 0x3);
mySHBins[index]++;
}
}
}
else /*if (myStart < end)*/
{
OUTPUTTYPE myOut[nMultires];
int myKeys[nMultires];
xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires);
#pragma unroll
for (int res = 0; res < nMultires; res++)
{
// index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid]
// Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops...
int key = myKeys[res];
int index = ((key >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (key & 0x3);
mySHBins[index]++;
}
}
}
template <histogram_type histotype, bool lastSteps, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
__global__
void histoKernel_smallBinByte(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* blockOut, int nOut, int maxblocks,
int nSteps)
{
// Ok - idea is as follows: When we have blocksize number of threads, thread tid's nth-bin is at:
// index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4)
// Example:
// With 32 threads bins #7, #8 and #9 will be at (7/4=1, 7%4=3, 8/4=2, 8%4=4, 9/4=2, 9%4=1):
// Bin #7 Bin #8 Bin #9 ... Bin #63
// tid | index index index ... index
// ============== ======== ======== ========
// 0 35 256 257 ... 1923
// 1 39 260 261 ... 1927
// 2 43 264 265 ... 1931
// ...
// 31 255 380 381 ... 2047
// Therefore there are blocksize x nOut number of 1-byte bins
// Outputs are gathered from time to time to 32-bit bins
//
// Example2:
// With 32 threads 7 bins
// Bin #0 Bin #1 Bin #2 Bin #3 Bin #4 Bin #5 Bin #6
// tid | index index index index index index index
// ============== ======== ======== ======== ======== ======== ========
// 0 0 1 2 3 128 129 130
// 1 4 5 6 7 132 133 134
// 2 8 9 10 11 136 137 138
// ...
// 30 120 121 122 123 248 249 250
// 31 124 125 126 127 252 253 254
//
// Example3:
// index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4)
// With 32 threads 3 bins
// Bin #0 Bin #1 Bin #2
// tid | index index index
// ============== ======== ========
// 0 0 1 2
// 1 4 5 6
// 2 8 9 10
// ...
// 30 120 121 122
// 31 124 125 126
extern __shared__ unsigned char allbins2[];
volatile unsigned char* mySHBins = &allbins2[threadIdx.x << 2];
int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0);
OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x];
#if __CUDA_ARCH__ >= 200
OUTPUTTYPE* resultbins = ourOut;
#else
OUTPUTTYPE* resultbins = (OUTPUTTYPE*)(&allbins2[padNOut << SMALL_BLOCK_SIZE_LOG2]);
#endif
INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + threadIdx.x);
// Run loops
//int nFullLoops = nSteps >> 7;
// Clear bins
{
int* tmpSHBins = &((int*)allbins2)[threadIdx.x];
// There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones
for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++)
tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0;
// for (int tmpbin = (bin << 2); tmpbin < padNOut; tmpbin++)
// mySHBins[tmpbin] = 0;
#if __CUDA_ARCH__ < 200
int binid = threadIdx.x;
while(binid < nOut)
{
resultbins[binid] = zero;
binid += SMALL_BLOCK_SIZE;
}
#endif
}
#if SMALL_BLOCK_SIZE > 32
__syncthreads();
#endif
const int looplim = (255 / nMultires) < 63 ? (255 / nMultires) : 63;
for (int stepsRem = nSteps; stepsRem > 0; stepsRem -= looplim)
{
if (stepsRem > looplim)
{
#define MANUAL_UNROLL 1
#if MANUAL_UNROLL
// Unroll manually
// ("unexcpected control flow" construct with #pragma unroll)
#define DO_STEP(NUM) do { if ((NUM) < looplim) { \
histoKernel_smallBinByteOneStep<lastSteps, nMultires>( \
input, xformObj, sumfunObj, myStart /*+ (NUM) * SMALL_BLOCK_SIZE*/, end,\
mySHBins, zero); myStart += SMALL_BLOCK_SIZE; \
} } while (0)
#define DO_16_STEPS(N0) do { \
DO_STEP(N0 + 0); DO_STEP(N0 + 1); DO_STEP(N0 + 2); DO_STEP(N0 + 3); \
DO_STEP(N0 + 4); DO_STEP(N0 + 5); DO_STEP(N0 + 6); DO_STEP(N0 + 7); \
DO_STEP(N0 + 8); DO_STEP(N0 + 9); DO_STEP(N0 + 10); DO_STEP(N0 + 11); \
DO_STEP(N0 + 12); DO_STEP(N0 + 13); DO_STEP(N0 + 14); DO_STEP(N0 + 15); \
} while (0)
DO_16_STEPS(0);
DO_16_STEPS(16);
DO_16_STEPS(32);
DO_16_STEPS(48);
#undef DO_16_STEPS
#undef DO_STEP
//myStart += looplim * SMALL_BLOCK_SIZE;
#else
for (int stepNum = 0; stepNum < looplim; stepNum++){
histoKernel_smallBinByteOneStep<lastSteps, nMultires>(
input,
xformObj,
sumfunObj,
myStart + stepNum * SMALL_BLOCK_SIZE, end,
mySHBins, zero);
}
myStart += looplim * SMALL_BLOCK_SIZE;
#endif // MANUAL_UNROLL
#undef MANUAL_UNROLL
}
else
{
for (int stepNum = 0; stepNum < stepsRem; stepNum++){
histoKernel_smallBinByteOneStep<lastSteps, nMultires>(
input,
xformObj,
sumfunObj,
myStart + stepNum * SMALL_BLOCK_SIZE, end,
mySHBins, zero);
}
myStart += looplim * SMALL_BLOCK_SIZE;
}
// Ok passes done - need to flush results together
{
# if SMALL_BLOCK_SIZE > 32
__syncthreads();
# endif
int binid = threadIdx.x;
while(binid < nOut)
{
// Start from own tid in order to avoid bank-conflicts:
// index = tid * 4 + 4 * (bin / 4) * blocksize + (bin % 4)
int index = (threadIdx.x << 2) + ((binid >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (binid & 0x3);
//int res = (int)allbins2[index];
int res = resultToInt(resultbins[binid]);
int ilimit = SMALL_BLOCK_SIZE - threadIdx.x;
#pragma unroll
for (int i=0; i < SMALL_BLOCK_SIZE; i++)
{
if (i == ilimit)
index -= (SMALL_BLOCK_SIZE << 2);
res += allbins2[index];
//allbins2[index] = 0;
index += 4;
}
intToResult(res, resultbins[binid]);
binid += SMALL_BLOCK_SIZE;
}
# if SMALL_BLOCK_SIZE > 32
__syncthreads();
# endif
// zero the bins
{
int* tmpSHBins = &((int*)allbins2)[threadIdx.x];
// There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones
for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++)
tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0;
}
# if SMALL_BLOCK_SIZE > 32
__syncthreads();
# endif
}
}
// In the end combine results:
#if __CUDA_ARCH__ < 200
#if SMALL_BLOCK_SIZE > 32
__syncthreads();
#endif
int keyIndex = threadIdx.x;
while (keyIndex < nOut)
{
OUTPUTTYPE result = ourOut[keyIndex];
//result = result + resultbins[keyIndex];
result = sumfunObj(result, *(OUTPUTTYPE*)(&resultbins[keyIndex]));
ourOut[keyIndex] = result;
keyIndex += SMALL_BLOCK_SIZE;
}
#endif
}
template <histogram_type histotype, typename OUTPUTTYPE>
static int getSmallBinBufSize(int nOut, cudaDeviceProp* props)
{
int maxblocks = props->multiProcessorCount * 3;
maxblocks *= 2;
if (nOut < 200) maxblocks *= 4;
maxblocks *= 4;
return (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
static
void callSmallBinHisto(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero,
OUTPUTTYPE* out, int nOut,
cudaDeviceProp* props,
int cuda_arch,
cudaStream_t stream,
int* getTmpBufSize,
void* tmpBuffer,
bool outInDev)
{
INDEXT size = end - start;
if (end <= start)
{
if (getTmpBufSize) *getTmpBufSize = 0;
return;
}
int maxblocks = props->multiProcessorCount * 3;
if (size > 2*1024*1024 || getTmpBufSize){
maxblocks *= 2;
// High occupancy requires lots of blocks
if (nOut < 200) maxblocks *= 4;
}
// TODO: Magic constants..
// With low bin-counts and large problems it seems beneficial to use
// more blocks...
if (nOut <= 128 || size > 2*4096*4096 || getTmpBufSize)
maxblocks *= 4;
//printf("maxblocks = %d\n", maxblocks);
OUTPUTTYPE* tmpOut;
if (getTmpBufSize) {
*getTmpBufSize = (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE);
return;
}
if (tmpBuffer)
tmpOut = (OUTPUTTYPE*)tmpBuffer;
else
cudaMalloc((void**)&tmpOut, (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE));
#if H_ERROR_CHECKS
/*assert(getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, props) >=
(maxblocks + 1) * nOut * sizeof(OUTPUTTYPE));*/
#endif
// cudaMemset(tmpOut, 0, sizeof(OUTPUTTYPE) * nOut * (maxblocks+1));
{
#define IBLOCK_SIZE_LOG2 7
#define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2)
int initPaddedSize =
((maxblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1));
const dim3 initblock = IBLOCK_SIZE;
dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 );
int nsteps = 1;
while (initgrid.x > (1 << 14))
{
initgrid.x >>= 1;
nsteps <<= 1;
if (nsteps * initgrid.x * IBLOCK_SIZE < maxblocks * nOut)
initgrid.x++;
}
initKernel<<<initgrid, initblock, 0, stream>>>(tmpOut, zero, maxblocks * nOut, nsteps);
#undef IBLOCK_SIZE_LOG2
#undef IBLOCK_SIZE
}
int sharedNeeded;
if (histotype == histogram_atomic_inc)
{
int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0);
sharedNeeded = (padNOut << SMALL_BLOCK_SIZE_LOG2);
if (cuda_arch < 200)
sharedNeeded += (nOut << 2);
}
else
{
int typesize = sizeof(OUTPUTTYPE);
sharedNeeded = (nOut * typesize) << SMALL_BLOCK_SIZE_LOG2;
//printf("Small-bin, generic, Shared needed = %d\n", sharedNeeded);
}
// Determine number of local variables
// SMALL_LOCALLIMIT is total local size available for one block:
int nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2);
if (nSteps * maxblocks * SMALL_BLOCK_SIZE < size) nSteps++;
if (nSteps > MAX_SMALL_STEPS) nSteps = MAX_SMALL_STEPS;
int nFullSteps = size / (nSteps * maxblocks * SMALL_BLOCK_SIZE);
dim3 grid = maxblocks;
dim3 block = SMALL_BLOCK_SIZE;
for (int i = 0; i < nFullSteps; i++)
{
if (histotype == histogram_atomic_inc)
histoKernel_smallBinByte<histotype, false, nMultires><<<grid, block, sharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
else
histoKernel_smallBin<false, nMultires><<<grid, block, sharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
start += nSteps * maxblocks * SMALL_BLOCK_SIZE;
#if H_ERROR_CHECKS
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror = %s\n", cudaGetErrorString( error ));
#endif
}
size = end - start;
if (size > 0)
{
// Do what steps we still can do without checks
nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2);
if (nSteps > 0)
{
if (histotype == histogram_atomic_inc)
histoKernel_smallBinByte<histotype, false, nMultires><<<grid, block, sharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
else
histoKernel_smallBin<false, nMultires><<<grid, block, sharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
start += nSteps * maxblocks * SMALL_BLOCK_SIZE;
}
}
size = end - start;
if (size > 0)
{
// Last step here:
int nblocks = size >> SMALL_BLOCK_SIZE_LOG2;
if (nblocks >= maxblocks) nblocks = maxblocks;
else if ((nblocks << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++;
nSteps = size / (nblocks << SMALL_BLOCK_SIZE_LOG2);
if (nSteps * nblocks * SMALL_BLOCK_SIZE < size)
{
nSteps++;
nblocks = size / (nSteps << SMALL_BLOCK_SIZE_LOG2);
if (((nSteps * nblocks) << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++;
}
grid.x = nblocks;
if (histotype == histogram_atomic_inc)
histoKernel_smallBinByte<histotype, true, nMultires><<<grid, block, sharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
else
histoKernel_smallBin<true, nMultires><<<grid, block, sharedNeeded, stream>>>(
input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps);
}
#if H_ERROR_CHECKS
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
printf("Cudaerror = %s\n", cudaGetErrorString( error ));
#endif
// Finally put together the result:
enum cudaMemcpyKind fromOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice;
enum cudaMemcpyKind toOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost;
if (stream != 0)
cudaMemcpyAsync(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream);
else
cudaMemcpy(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut);
// Let's do so that one block handles one bin
grid.x = nOut;
//grid.x = nOut >> SMALL_BLOCK_SIZE_LOG2;
//if ((grid.x << SMALL_BLOCK_SIZE_LOG2) < nOut) grid.x++;
block.x = GATHER_BLOCK_SIZE;
gatherKernel<<<grid, block, 0, stream>>>(sumfunObj, tmpOut, nOut, maxblocks, zero);
// TODO: Use async copy for the results as well?
if (outInDev && stream != 0)
cudaMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream);
else
cudaMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut);
#if 0
{
int resIdx;
int i;
OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(maxblocks * nOut * sizeof(OUTPUTTYPE));
//parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE));
cudaMemcpy(h_tmp, tmpOut, maxblocks*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost);
for (resIdx = 0; resIdx < nOut; resIdx++)
{
OUTPUTTYPE res = out[resIdx];
for (i = 0; i < maxblocks; i++)
{
res = sumfunObj(res, h_tmp[i * nOut + resIdx]);
}
out[resIdx] = sumfunObj(res, out[resIdx]);
}
free(h_tmp);
}
#endif
if (!tmpBuffer)
cudaFree(tmpOut);
}
template <histogram_type histotype, typename OUTPUTTYPE>
static inline
bool smallBinLimit(int nOut, OUTPUTTYPE zero, cudaDeviceProp* props, int cuda_arch)
{
int shlimit = props->sharedMemPerBlock - 300;
int typeSize = sizeof(OUTPUTTYPE);
if (histotype == histogram_atomic_inc)
if ((((4 * nOut) << 5) + (cuda_arch < 200 ? nOut * 16 : 0)) < shlimit)
return true;
if (((4 * nOut * typeSize) << 5) < shlimit)
return true;
return false;
}
__global__
void detectCudaArchKernel(int* res)
{
int result;
#if __CUDA_ARCH__ >= 210
result = 210;
#elif __CUDA_ARCH__ >= 200
result = 200;
#elif __CUDA_ARCH__ >= 130
result = 130;
#elif __CUDA_ARCH__ >= 120
result = 120;
#elif __CUDA_ARCH__ >= 110
result = 110;
#else
result = 100;
#endif
if (threadIdx.x == 0)
*res = result;
}
static
int DetectCudaArch(void)
{
// The only way to know from host-code, which device architecture our kernels have been generated
// against, is to run a kernel that actually checks it.. :)
dim3 grid = 1;
//dim3 block = 32;
// TODO: Allow static storage so that we can ask just once for the arch???
// NOTE: This function implies synchromization between CPU and GPU - so use static here...
static int result = 0;
//int result = 0;
if (result == 0)
{
void* tmpBuf;
cudaMalloc(&tmpBuf, sizeof(int));
detectCudaArchKernel<<<grid, grid>>>((int*)tmpBuf);
cudaMemcpy(&result, tmpBuf, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(tmpBuf);
//printf("Detected CUDA_ARCH = %d\n", result);
}
return result;
}
static bool runMultiPass(int nOut, cudaDeviceProp* props, int cuda_arch, size_t outsize, histogram_type histotype)
{
int subsize = determineSubHistoSize(nOut, outsize, histotype, cuda_arch, props);
if (cuda_arch < 120){
if (subsize <= 0 || nOut > 2 * subsize)
return false;
return true;
}
else
{
if (subsize <= 0 || nOut > 16 * subsize)
return false;
return true;
}
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
cudaError_t
callHistogramKernel(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT start, INDEXT end,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
cudaStream_t stream, void* tmpBuffer,
bool allowMultiPass)
{
int devId;
cudaDeviceProp props;
cudaError_t cudaErr = cudaGetDevice( &devId );
if (cudaErr != 0) return cudaErr;
//assert(!cudaErr);
cudaErr = cudaGetDeviceProperties( &props, devId );
if (cudaErr != 0) return cudaErr;
int cuda_arch = DetectCudaArch();
enum cudaFuncCache old;
cudaThreadGetCacheConfig(&old);
cudaThreadSetCacheConfig(cudaFuncCachePreferShared);
if (nOut <= 0) return cudaSuccess;
// 100 Mib printf-limit should be enough...
// cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 1024 * 1024 * 100);
if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch))
{
callSmallBinHisto<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev);
}
else if (binsFitIntoShared(nOut, zero, &props, cuda_arch))
{
callHistogramKernelImpl<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, NULL, tmpBuffer, outInDev, cuda_arch);
}
else if (allowMultiPass && runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype))
{
callHistogramKernelMultiPass<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, tmpBuffer, outInDev, cuda_arch);
}
else
{
callHistogramKernelLargeNBins<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev);
}
cudaThreadSetCacheConfig(old);
return cudaSuccess;
}
template <typename nDimIndexFun, int nDim, typename USERINPUTTYPE, typename INDEXT, typename OUTPUTTYPE>
class wrapHistoInput
{
public:
nDimIndexFun userIndexFun;
INDEXT starts[nDim];
//int ends[nDim];
INDEXT sizes[nDim];
__host__ __device__
void operator() (USERINPUTTYPE input, INDEXT i, int* result_index, OUTPUTTYPE* results, int nresults) const {
int coords[nDim];
int tmpi = i;
#pragma unroll
for (int d=0; d < nDim; d++)
{
// Example of how this logic works - imagine a cube of (10,100,1000), and take index 123 456
// newI = 123 456 / 10 = 12 345, offset = 123 456 - 123 450 = 6 (this is our first coordinate!),
// newI = 12 345 / 100 = 123, offset = 12 345 - 12 300 = 45 (this is our second coordinate!),
// newI = 123 / 1000 = 0, offset = 123 - 0 = 123 (this is our last coordinate!)
// Result = [123, 45, 6]
INDEXT newI = tmpi / sizes[d];
INDEXT offset = tmpi - newI * sizes[d];
coords[d] = starts[d] + offset;
tmpi = newI;
}
// Now just call wrapped functor with right coordinate values
userIndexFun(input, coords, result_index, results, nresults);
}
};
template <histogram_type histotype, int nMultires, int nDim,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
cudaError_t
callHistogramKernelNDim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT* starts, INDEXT* ends,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
cudaStream_t stream, void* tmpBuffer,
bool allowMultiPass)
{
wrapHistoInput<TRANSFORMFUNTYPE, nDim, INPUTTYPE, INDEXT, OUTPUTTYPE> wrapInput;
INDEXT start = 0;
INDEXT size = 1;
for (int d = 0; d < nDim; d++)
{
wrapInput.starts[d] = starts[d];
wrapInput.sizes[d] = ends[d] - starts[d];
// Example: starts = [3, 10, 23], sizes = [10, 100, 1000]
// start = 3 * 1 = 3, size = 10
// start = 3 + 10 * 10 = 103, size = 10*100 = 1000
// start = 103 + 1000*23 = 23 103, size = 1000*1000 = 1 000 000
start += starts[d] * size;
size *= wrapInput.sizes[d];
if (ends[d] <= starts[d]) return cudaSuccess;
}
wrapInput.userIndexFun = xformObj;
INDEXT end = start + size;
return callHistogramKernel<histotype, nMultires>
(input, wrapInput, sumfunObj, start, end, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass);
}
template <histogram_type histotype, int nMultires,
typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE,
typename OUTPUTTYPE, typename INDEXT>
cudaError_t
callHistogramKernel2Dim(
INPUTTYPE input,
TRANSFORMFUNTYPE xformObj,
SUMFUNTYPE sumfunObj,
INDEXT x0, INDEXT x1,
INDEXT y0, INDEXT y1,
OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut,
bool outInDev,
cudaStream_t stream, void* tmpBuffer,
bool allowMultiPass)
{
INDEXT starts[2] = { x0, y0 };
INDEXT ends[2] = { x1, y1 };
return callHistogramKernelNDim<histotype, nMultires, 2>
(input, xformObj, sumfunObj, starts, ends, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass);
}
struct histogram_defaultXform
{
__host__ __device__
void operator() (int* input, int i, int* result_index, int* results, int nresults) const {
//int idata = input[i];
#pragma unroll
for (int resIndex = 0; resIndex < nresults; resIndex++)
{
*result_index++ = *input++;
*results++ = 1;
}
}
};
template <typename OUTPUTTYPE>
struct histogram_defaultSum
{
__host__ __device__
OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const {
return i1 + i2;
}
};
template <typename INPUTTYPE, typename OUTPUTTYPE>
struct histogram_dummyXform
{
__host__ __device__
void operator() (INPUTTYPE* input, int i, int* result_index, OUTPUTTYPE* results, int nresults) const {
//int idata = input[i];
int index = i;
(void)input;
#pragma unroll
for (int resIndex = 0; resIndex < nresults; resIndex++)
{
*result_index++ = index++;
*results++ = 1;//*input++;
}
}
};
template <typename OUTPUTTYPE>
struct histogram_dummySum
{
__host__ __device__
OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const {
return i1;
}
};
template <histogram_type histotype, typename OUTPUTTYPE>
int getHistogramBufSize(OUTPUTTYPE zero, int nOut)
{
int result = 0;
int devId;
cudaDeviceProp props;
cudaError_t cudaErr = cudaGetDevice( &devId );
if (cudaErr != 0) return -1;
//assert(!cudaErr);
cudaErr = cudaGetDeviceProperties( &props, devId );
if (cudaErr != 0) return -1;
int cuda_arch = DetectCudaArch();
if (nOut <= 0) return 0;
if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch))
{
result = getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, &props);
}
else if (binsFitIntoShared(nOut, zero, &props, cuda_arch))
{
result = getMediumHistoTmpbufSize<histotype, OUTPUTTYPE>(nOut, &props);
}
else if (runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype))
{
result = getMultipassBufSize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch);
}
else
{
result = getLargeBinTmpbufsize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch);
}
return result;
}
// undef everything
#undef H_ERROR_CHECKS
#undef HBLOCK_SIZE_LOG2
#undef HBLOCK_SIZE
#undef HMBLOCK_SIZE_LOG2
#undef HMBLOCK_SIZE
#undef LBLOCK_SIZE_LOG2
#undef LBLOCK_SIZE
#undef GATHER_BLOCK_SIZE_LOG2
#undef GATHER_BLOCK_SIZE
#undef LBLOCK_WARPS
#undef RBLOCK_SIZE
#undef RMAXSTEPS
#undef NHSTEPSPERKEY
#undef MAX_NHSTEPS
#undef MAX_MULTISTEPS
#undef MAX_NLHSTEPS
#undef STRATEGY_CHECK_INTERVAL_LOG2
#undef STRATEGY_CHECK_INTERVAL
#undef HASH_COLLISION_STEPS
#undef USE_JENKINS_HASH
#undef LARGE_NBIN_CHECK_INTERVAL_LOG2
#undef LARGE_NBIN_CHECK_INTERVAL
#undef SMALL_BLOCK_SIZE_LOG2
#undef SMALL_BLOCK_SIZE
#undef MAX_SMALL_STEPS
#undef USE_ATOMICS_HASH
#undef USE_BALLOT_HISTOGRAM
#undef TAKE_WARP_MUTEX
#undef GIVE_WARP_MUTEX
#undef FREE_MUTEX_ID
#if USE_MEDIUM_PATH
#undef MEDIUM_BLOCK_SIZE_LOG2
#undef MEDIUM_BLOCK_SIZE
#endif
#undef USE_MEDIUM_PATH
|
5df89ce1d57b889c3fa4d2569e6ab57fa19d00d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "friction_update.cuh"
__global__ void friction_update
(
SimulationParameters sim_params,
SolverParameters solver_params,
real dt,
AssembledSolution d_assem_sol
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < sim_params.cells + 2)
{
if (d_assem_sol.h_BC[x] > solver_params.tol_dry && abs(d_assem_sol.q_BC[x]) > solver_params.tol_dry)
{
real u = d_assem_sol.q_BC[x] / d_assem_sol.h_BC[x];
real Cf = solver_params.g * pow(sim_params.manning, C(2.0)) / pow(d_assem_sol.h_BC[x], C(1.0) / C(3.0));
real Sf = -Cf * abs(u) * u;
real D = 1 + 2 * dt * Cf * abs(u) / d_assem_sol.h_BC[x];
// Update
d_assem_sol.q_BC[x] += dt * Sf / D;
}
}
} | 5df89ce1d57b889c3fa4d2569e6ab57fa19d00d2.cu | #include "friction_update.cuh"
__global__ void friction_update
(
SimulationParameters sim_params,
SolverParameters solver_params,
real dt,
AssembledSolution d_assem_sol
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < sim_params.cells + 2)
{
if (d_assem_sol.h_BC[x] > solver_params.tol_dry && abs(d_assem_sol.q_BC[x]) > solver_params.tol_dry)
{
real u = d_assem_sol.q_BC[x] / d_assem_sol.h_BC[x];
real Cf = solver_params.g * pow(sim_params.manning, C(2.0)) / pow(d_assem_sol.h_BC[x], C(1.0) / C(3.0));
real Sf = -Cf * abs(u) * u;
real D = 1 + 2 * dt * Cf * abs(u) / d_assem_sol.h_BC[x];
// Update
d_assem_sol.q_BC[x] += dt * Sf / D;
}
}
} |
0f2c98de145d3a05b6794c441d9819c8ae3eb501.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "helper_math.h"
#include "FastDeviceMinMax.h"
#include "Logger.h"
#include "CUDAAssert.h"
#include <cstdio>
#define ENABLE_PROFILING 1 // set to 0 when using GPGPU-Sim, 1 when profiling cuda on actual HW
__device__ float4* BVHTreeNodes;
__device__ float4* TriangleWoopCoordinates;
__device__ int* MappingFromTriangleAddressToIndex;
__device__ inline bool RayBoxIntersection(float3 Low, float3 High, float3 InvDir, float3 Ood, float TMin, float TMax, float& OutIntersectionDist)
{
const float3 lo = Low * InvDir - Ood;
const float3 hi = High * InvDir - Ood;
const float slabMin = tMinFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMin);
const float slabMax = tMaxFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMax);
OutIntersectionDist = slabMin;
return slabMin <= slabMax;
}
__global__ void rtTraceBVH2Plain(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount,
int* finishedRayCount
)
{
const int EntrypointSentinel = 0x76543210;
const int STACK_SIZE = 32;
const float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number
int traversalStack[STACK_SIZE];
int rayidx = blockIdx.x * blockDim.x + threadIdx.x;
float3 idir; // 1 / ray direction
float3 ood;
float2 triangleuv;
if (rayidx >= rayCount)
return;
float3 RayOrigin = make_float3(rayBuffer[rayidx].origin_tmin);
float3 RayDirection = make_float3(rayBuffer[rayidx].dir_tmax);
float tmin = rayBuffer[rayidx].origin_tmin.w;
float hitT = rayBuffer[rayidx].dir_tmax.w;
// ooeps is very small number, used instead of raydir xyz component when that component is near zero
idir.x = 1.0f / (fabsf(RayDirection.x) > ooeps ? RayDirection.x : copysignf(ooeps, RayDirection.x)); // inverse ray direction
idir.y = 1.0f / (fabsf(RayDirection.y) > ooeps ? RayDirection.y : copysignf(ooeps, RayDirection.y)); // inverse ray direction
idir.z = 1.0f / (fabsf(RayDirection.z) > ooeps ? RayDirection.z : copysignf(ooeps, RayDirection.z)); // inverse ray direction
ood = RayOrigin * idir;
// Setup traversal + initialisation
traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 (1985229328 in decimal)
int* stackPtr = &traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel
int nodeAddr = 0; // Start from the root.
int hitAddr = -1; // No triangle intersected so far.
int leafAddr = 0;
const float4* localBVHTreeNodes = BVHTreeNodes;
const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates;
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
leafAddr = 0;
while (nodeAddr != EntrypointSentinel && nodeAddr >= 0)
{
const float4 n0xy = __ldg(localBVHTreeNodes + nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
const float4 n1xy = __ldg(localBVHTreeNodes + nodeAddr + 1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
const float4 n01z = __ldg(localBVHTreeNodes + nodeAddr + 2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 tmp = BVHTreeNodes[nodeAddr + 3]; // child_index0, child_index1
int2 cnodes = *(int2*)&tmp;
const float3 c0lo = make_float3(n0xy.x, n0xy.z, n01z.x);
const float3 c0hi = make_float3(n0xy.y, n0xy.w, n01z.y);
const float3 c1lo = make_float3(n1xy.x, n1xy.z, n01z.z);
const float3 c1hi = make_float3(n1xy.y, n1xy.w, n01z.w);
float c0dist, c1dist;
bool traverseChild0 = RayBoxIntersection(c0lo, c0hi, idir, ood, tmin, hitT, c0dist);
bool traverseChild1 = RayBoxIntersection(c1lo, c1hi, idir, ood, tmin, hitT, c1dist);
bool swp = c1dist < c0dist;
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *stackPtr;
stackPtr--;
}
else
{
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
if (traverseChild0 && traverseChild1)
{
if (swp)
swap(nodeAddr, cnodes.y);
stackPtr++;
*stackPtr = cnodes.y;
}
}
if (nodeAddr < 0 && leafAddr >= 0)
{
leafAddr = nodeAddr;
nodeAddr = *stackPtr;
stackPtr--;
}
if (!__any_sync(__activemask(), leafAddr >= 0))
break;
}
while (leafAddr < 0)
{
for (int triAddr = ~leafAddr;; triAddr += 3)
{
float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0);
float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1);
float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2);
if (__float_as_int(v00.x) == 0x80000000)
break;
float Oz = v00.w - RayOrigin.x * v00.x - RayOrigin.y * v00.y - RayOrigin.z * v00.z;
float invDz = 1.0f / (RayDirection.x*v00.x + RayDirection.y*v00.y + RayDirection.z*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
float Ox = v11.w + RayOrigin.x * v11.x + RayOrigin.y * v11.y + RayOrigin.z * v11.z;
float Dx = RayDirection.x * v11.x + RayDirection.y * v11.y + RayDirection.z * v11.z;
float u = Ox + t * Dx;
if (u >= 0.0f && u <= 1.0f)
{
float Oy = v22.w + RayOrigin.x * v22.x + RayOrigin.y * v22.y + RayOrigin.z * v22.z;
float Dy = RayDirection.x * v22.x + RayDirection.y * v22.y + RayDirection.z * v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
triangleuv.x = u;
triangleuv.y = v;
hitT = t;
hitAddr = triAddr;
}
}
}
}
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = *stackPtr;
stackPtr--;
}
}
}
rayResultBuffer[rayidx].t_triId_u_v = make_float4(
hitT,
int_as_float(hitAddr),
triangleuv.x,
triangleuv.y
);
}
__host__ void rtBindBVH2Data(
const float4* InBVHTreeNodes,
const float4* InTriangleWoopCoordinates,
const int* InMappingFromTriangleAddressToIndex)
{
cudaCheck(hipMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex)));
cudaCheck(hipMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates)));
cudaCheck(hipMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes)));
}
__host__ void rtTraceBVH2(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount
)
{
#if ENABLE_PROFILING==1
float elapsedTime;
hipEvent_t startEvent, stopEvent;
cudaCheck(hipEventCreate(&startEvent));
cudaCheck(hipEventCreate(&stopEvent));
#endif
int* cudaFinishedRayCount;
cudaCheck(hipMalloc(&cudaFinishedRayCount, sizeof(int)));
hipMemset(cudaFinishedRayCount, 0, sizeof(int));
dim3 blockDim(128, 1);
dim3 gridDim(idivCeil(rayCount, blockDim.x), 1);
#if ENABLE_PROFILING==1
hipProfilerStart();
cudaCheck(hipEventRecord(startEvent, 0));
#endif
Log("start Aila tracing\n");
hipLaunchKernelGGL(( rtTraceBVH2Plain) , dim3(gridDim), dim3(blockDim) , 0, 0,
rayBuffer,
rayResultBuffer,
rayCount,
cudaFinishedRayCount
);
#if ENABLE_PROFILING==1
cudaCheck(hipEventRecord(stopEvent, 0));
cudaCheck(hipEventSynchronize(stopEvent));
cudaCheck(hipEventElapsedTime(&elapsedTime, startEvent, stopEvent));
Log("%.3fMS, %.2lfMRays/s (rtTraceBVH2 No Dynamic Fetch)", elapsedTime, (double)rayCount / 1000000.0f / (elapsedTime / 1000.0f));
hipProfilerStop();
#endif
hipFree(cudaFinishedRayCount);
}
| 0f2c98de145d3a05b6794c441d9819c8ae3eb501.cu | #include <cuda_profiler_api.h>
#include "helper_math.h"
#include "FastDeviceMinMax.h"
#include "Logger.h"
#include "CUDAAssert.h"
#include <cstdio>
#define ENABLE_PROFILING 1 // set to 0 when using GPGPU-Sim, 1 when profiling cuda on actual HW
__device__ float4* BVHTreeNodes;
__device__ float4* TriangleWoopCoordinates;
__device__ int* MappingFromTriangleAddressToIndex;
__device__ inline bool RayBoxIntersection(float3 Low, float3 High, float3 InvDir, float3 Ood, float TMin, float TMax, float& OutIntersectionDist)
{
const float3 lo = Low * InvDir - Ood;
const float3 hi = High * InvDir - Ood;
const float slabMin = tMinFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMin);
const float slabMax = tMaxFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMax);
OutIntersectionDist = slabMin;
return slabMin <= slabMax;
}
__global__ void rtTraceBVH2Plain(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount,
int* finishedRayCount
)
{
const int EntrypointSentinel = 0x76543210;
const int STACK_SIZE = 32;
const float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number
int traversalStack[STACK_SIZE];
int rayidx = blockIdx.x * blockDim.x + threadIdx.x;
float3 idir; // 1 / ray direction
float3 ood;
float2 triangleuv;
if (rayidx >= rayCount)
return;
float3 RayOrigin = make_float3(rayBuffer[rayidx].origin_tmin);
float3 RayDirection = make_float3(rayBuffer[rayidx].dir_tmax);
float tmin = rayBuffer[rayidx].origin_tmin.w;
float hitT = rayBuffer[rayidx].dir_tmax.w;
// ooeps is very small number, used instead of raydir xyz component when that component is near zero
idir.x = 1.0f / (fabsf(RayDirection.x) > ooeps ? RayDirection.x : copysignf(ooeps, RayDirection.x)); // inverse ray direction
idir.y = 1.0f / (fabsf(RayDirection.y) > ooeps ? RayDirection.y : copysignf(ooeps, RayDirection.y)); // inverse ray direction
idir.z = 1.0f / (fabsf(RayDirection.z) > ooeps ? RayDirection.z : copysignf(ooeps, RayDirection.z)); // inverse ray direction
ood = RayOrigin * idir;
// Setup traversal + initialisation
traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 (1985229328 in decimal)
int* stackPtr = &traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel
int nodeAddr = 0; // Start from the root.
int hitAddr = -1; // No triangle intersected so far.
int leafAddr = 0;
const float4* localBVHTreeNodes = BVHTreeNodes;
const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates;
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
leafAddr = 0;
while (nodeAddr != EntrypointSentinel && nodeAddr >= 0)
{
const float4 n0xy = __ldg(localBVHTreeNodes + nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
const float4 n1xy = __ldg(localBVHTreeNodes + nodeAddr + 1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
const float4 n01z = __ldg(localBVHTreeNodes + nodeAddr + 2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 tmp = BVHTreeNodes[nodeAddr + 3]; // child_index0, child_index1
int2 cnodes = *(int2*)&tmp;
const float3 c0lo = make_float3(n0xy.x, n0xy.z, n01z.x);
const float3 c0hi = make_float3(n0xy.y, n0xy.w, n01z.y);
const float3 c1lo = make_float3(n1xy.x, n1xy.z, n01z.z);
const float3 c1hi = make_float3(n1xy.y, n1xy.w, n01z.w);
float c0dist, c1dist;
bool traverseChild0 = RayBoxIntersection(c0lo, c0hi, idir, ood, tmin, hitT, c0dist);
bool traverseChild1 = RayBoxIntersection(c1lo, c1hi, idir, ood, tmin, hitT, c1dist);
bool swp = c1dist < c0dist;
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *stackPtr;
stackPtr--;
}
else
{
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
if (traverseChild0 && traverseChild1)
{
if (swp)
swap(nodeAddr, cnodes.y);
stackPtr++;
*stackPtr = cnodes.y;
}
}
if (nodeAddr < 0 && leafAddr >= 0)
{
leafAddr = nodeAddr;
nodeAddr = *stackPtr;
stackPtr--;
}
if (!__any_sync(__activemask(), leafAddr >= 0))
break;
}
while (leafAddr < 0)
{
for (int triAddr = ~leafAddr;; triAddr += 3)
{
float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0);
float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1);
float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2);
if (__float_as_int(v00.x) == 0x80000000)
break;
float Oz = v00.w - RayOrigin.x * v00.x - RayOrigin.y * v00.y - RayOrigin.z * v00.z;
float invDz = 1.0f / (RayDirection.x*v00.x + RayDirection.y*v00.y + RayDirection.z*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
float Ox = v11.w + RayOrigin.x * v11.x + RayOrigin.y * v11.y + RayOrigin.z * v11.z;
float Dx = RayDirection.x * v11.x + RayDirection.y * v11.y + RayDirection.z * v11.z;
float u = Ox + t * Dx;
if (u >= 0.0f && u <= 1.0f)
{
float Oy = v22.w + RayOrigin.x * v22.x + RayOrigin.y * v22.y + RayOrigin.z * v22.z;
float Dy = RayDirection.x * v22.x + RayDirection.y * v22.y + RayDirection.z * v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
triangleuv.x = u;
triangleuv.y = v;
hitT = t;
hitAddr = triAddr;
}
}
}
}
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = *stackPtr;
stackPtr--;
}
}
}
rayResultBuffer[rayidx].t_triId_u_v = make_float4(
hitT,
int_as_float(hitAddr),
triangleuv.x,
triangleuv.y
);
}
__host__ void rtBindBVH2Data(
const float4* InBVHTreeNodes,
const float4* InTriangleWoopCoordinates,
const int* InMappingFromTriangleAddressToIndex)
{
cudaCheck(cudaMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex)));
cudaCheck(cudaMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates)));
cudaCheck(cudaMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes)));
}
__host__ void rtTraceBVH2(
Ray* rayBuffer,
Hit* rayResultBuffer,
int rayCount
)
{
#if ENABLE_PROFILING==1
float elapsedTime;
cudaEvent_t startEvent, stopEvent;
cudaCheck(cudaEventCreate(&startEvent));
cudaCheck(cudaEventCreate(&stopEvent));
#endif
int* cudaFinishedRayCount;
cudaCheck(cudaMalloc(&cudaFinishedRayCount, sizeof(int)));
cudaMemset(cudaFinishedRayCount, 0, sizeof(int));
dim3 blockDim(128, 1);
dim3 gridDim(idivCeil(rayCount, blockDim.x), 1);
#if ENABLE_PROFILING==1
cudaProfilerStart();
cudaCheck(cudaEventRecord(startEvent, 0));
#endif
Log("start Aila tracing\n");
rtTraceBVH2Plain <<< gridDim, blockDim >>> (
rayBuffer,
rayResultBuffer,
rayCount,
cudaFinishedRayCount
);
#if ENABLE_PROFILING==1
cudaCheck(cudaEventRecord(stopEvent, 0));
cudaCheck(cudaEventSynchronize(stopEvent));
cudaCheck(cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent));
Log("%.3fMS, %.2lfMRays/s (rtTraceBVH2 No Dynamic Fetch)", elapsedTime, (double)rayCount / 1000000.0f / (elapsedTime / 1000.0f));
cudaProfilerStop();
#endif
cudaFree(cudaFinishedRayCount);
}
|
3d1dea42345ad71b8d4ac9aa2305e36e8aa39511.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include<stdlib.h>
#include <errno.h>
#include "util.cu.h"
#include "key-p2p.h"
#include "key_api.h"
#ifndef CUDA_SAFE_CALL
#define CUDA_SAFE_CALL(err) __checkCudaErrors (err, __FILE__, __LINE__)
#endif
#include<algorithm>
#include<pthread.h>
#include"original_gpu.cu"
//1using namespace std;
#ifdef TRACE
#define PRINT_TRACE(...) fprintf(stderr, __VA_ARGS__);
#else
#define PRINT_TRACE(...)
#endif
#ifdef TIMING_STATS
#define PRINT_TIMES(...) fprintf(stderr, __VA_ARGS__);
#else
#define PRINT_TIMES(...)
#endif
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int) err, hipGetErrorString(err));
exit(-1);
}
}
#define START_KEY 10000
#define START_PUT 1000000
#define MAX_CHUNK_NUM 8 //64
pthread_t rwLoopTasksIDs[2][MAX_CHUNK_NUM];
volatile int done = 0;
//pthread_mutex_t rwLoopTasksLocks[RW_HOST_WORKERS];
//pthread_cond_t rwLoopTasksConds[RW_HOST_WORKERS];
//pthread_t rwLoopTasksIDs[RW_HOST_WORKERS];
//TaskData rwLoopTasksData[RW_HOST_WORKERS];
typedef struct _pipe{
int id;
int num_thread;
int i;
int chunk_cnt;
int request_num;
unsigned int request_size;
int *request_list;
char *ptr;
int call;
}Pipe;
Pipe task_data[2][MAX_CHUNK_NUM];
unsigned long transfer_size[2][MAX_CHUNK_NUM]={0};
volatile int request_status[2][MAX_CHUNK_NUM]={0};
pthread_spinlock_t request_lock[2][MAX_CHUNK_NUM];
pthread_mutex_t cond_lock[2][MAX_CHUNK_NUM];
pthread_cond_t cond[2][MAX_CHUNK_NUM];
void* pipe_task(void* param){
Pipe* s=(Pipe*)param;
int request_start[MAX_CHUNK_NUM]={0};
int id=s->id;
int chunk_cnt=s->chunk_cnt;
int request_num=s->request_num;
int* request_list=s->request_list;
unsigned int request_size=s->request_size;
int num_thread=s->num_thread;
int call=s->call;
char *buffer=s->ptr;
unsigned long chunk_size=(unsigned long)request_size*chunk_cnt;
int next_key=chunk_cnt*num_thread;
int i,j,start;
int ret;
char *temp_buffer;
// printf("222222222222222222222");
if(posix_memalign((void**)&temp_buffer,KEY_PAGE_SIZE,request_size)){
printf("can not allocate io payload buffer!\n");
return NULL;
}
i=s->i; //num_thread;
// for(i=0;i<num_thread;i++){
request_start[i]=i*chunk_cnt;
// }
// printf("thread %d start!\n",id);
while(!done){
// break;
int cnt=0,complete_cnt=0;
// for(i=0;i<num_thread;i++){
if(request_start[i]>=request_num){
complete_cnt++; break;
}
int status;
/*
pthread_spin_lock(&request_lock[id][i]);
status=request_status[id][i];
pthread_spin_unlock(&request_lock[id][i]);
if((id==0 & status!=0) || (id==1 && status!=1) ){
pthread_mutex_lock(&cond_lock[id][i]);
pthread_cond_signal(&cond[id][i]);
pthread_mutex_unlock(&cond_lock[id][i]);
}
*/
status=0; /////////////////////////////////
// printf("thread_start!\n");
if(id==0 && status==0){
cnt++;
int end=min(request_start[i]+chunk_cnt,request_num);
start=0;
for(j=request_start[i];j<end;j++){
if(call==0){//no p2p
ret=key_op(KEY_GET,request_list[j],temp_buffer,request_size);
// memcpy(buffer+(i*chunk_size)+(start*request_size),temp_buffer,request_size);
// ret=key_op(KEY_GET,request_list[j],buffer+(i*chunk_size)+(start*request_size),request_size);
}
else{
ret=key_p2p_op(KEY_GET,request_list[j],(i*chunk_size)+(start*request_size),request_size);
}
if(ret<=0) printf("key_op error!\n");
start++;
}
transfer_size[id][i]+=(unsigned long)(end-request_start[i])*request_size;
request_start[i]+=next_key;
/*
pthread_spin_lock(&request_lock[id][i]);
request_status[id][i]=1;
pthread_spin_unlock(&request_lock[id][i]);
*/
}
else if(id==1 && status==1){
cnt++;
int end=min(request_start[i]+chunk_cnt,request_num);
start=0;
for(j=request_start[i];j<end;j++){
memcpy(temp_buffer, buffer+(i*chunk_size)+(start*request_size),request_size);
ret=key_op(KEY_PUT,START_PUT+request_list[j],temp_buffer,request_size);
//ret=key_op(KEY_PUT, START_PUT+request_list[j], buffer+(i*chunk_size)+(start*request_size),request_size);
if(ret<0) printf("key_op error!\n");
start++;
}
transfer_size[id][i]+=(unsigned long)(end-request_start[i])*request_size;
request_start[i]+=next_key;
/*
pthread_spin_lock(&request_lock[id][i]);
request_status[id][i]=0;
pthread_spin_unlock(&request_lock[id][i]);
*/
}
// }
// if(complete_cnt>=num_thread) break;
// if(cnt==0) usleep(100);
}
if(id==1){
// for(i=0;i<num_thread;i++){
/*
pthread_spin_lock(&request_lock[id][i]);
request_status[id][i]=2;
pthread_spin_unlock(&request_lock[id][i]);
*/
// }
}
free(temp_buffer);
return NULL;
}
int main(int argc, char** argv)
{
int device = 0;
int request_num=1024, request_mode=2, num_thread=1, handle=1,call=0;
unsigned int request_size=1024*1024;
int gpu_thread=64,gpu_block=256;
int gpu_memory_size=(192*1024*1024); ///p2p mem :220MB
int batch=32;
int ret=0;
while(1){
int c;
c=getopt(argc,argv,"s:n:m:p:i:b:t:c:q:h");
if(c==-1) break;
switch(c){
case 's':
request_size=strtol(optarg,NULL,0);
break;
case 'n':
request_num=strtol(optarg,NULL,0);
break;
case 'm':
request_mode=strtol(optarg,NULL,0);
break;
case 'p':
num_thread=strtol(optarg,NULL,0);
break;
case 'i':
handle=strtol(optarg,NULL,0);
break;
case 'b':
gpu_block=strtol(optarg,NULL,0);
break;
case 't':
gpu_thread=strtol(optarg,NULL,0);
break;
case 'c':
call=strtol(optarg,NULL,0);
break;
case 'q':
batch=strtol(optarg,NULL,0);
break;
case 'h':
printf("syntax: %s -s <request_size> -n <num_request> -m <mode:get-0,put-1,get_and_put-2,program-3> -p <num_polling_thread> -i <process:one-0,batch_async-1,batch_sync-2> -q <batch_size> -b <gpu block> -t < gpu theads in block> -c <call:0-thread,1-warp,2-block> -h\n",argv[0]);
exit(1);
break;
default:
printf("ERROR: invalid option\n");
exit(1);
break;
}
}
char *user_get,*user_put;
user_get=(char*)malloc(sizeof(char)*gpu_memory_size);
user_put=(char*)malloc(sizeof(char)*gpu_memory_size);
// printf("1111111111111\n");
///////////////////////////////////////////////////////
///////key_value open
int key_ret=key_open(F_NAME);
// printf("1111111111111\n");
//////////////////////////////////////////////////////
//////request list make
int *request_list;
int i,j;
request_list=(int*)malloc(sizeof(int)*request_num);
for(i=0;i<request_num;i++){
request_list[i]=i+START_KEY;
}
random_shuffle(request_list,request_list+request_num);
// ASSERTRT(hipMalloc((void**)&cuda_request,sizeof(int)*request_num));
// ASSERTRT(hipMemcpy(cuda_request,request_list,sizeof(int)*request_num,hipMemcpyHostToDevice));
/////////////////////////////////////////////////////
///////push data to key_value_SSD or GPU_MEMORY
// char *temp_data=(char*)malloc(sizeof(char)*gpu_memory_size);
// memset(temp_data,0,gpu_memory_size);
int temp_cnt=0;
// printf("1111111111111\n");
////////ready for START_KEY
int request_start[MAX_CHUNK_NUM]={0};
int stage[MAX_CHUNK_NUM]={0};
int chunk_cnt=(gpu_memory_size/num_thread)/request_size;
int next_key=chunk_cnt*num_thread;
unsigned long chunk_size=(unsigned long)chunk_cnt*request_size;
unsigned long out_size;
unsigned int out_request_size;
int n=sqrt(gpu_thread);
dim3 nthreads(n,n);
int cuda_block=gpu_block/num_thread;
double time_before,time_after;
if(request_mode==3){ out_size=chunk_size/3; out_request_size=request_size/3;} //image
else{ out_size=chunk_size; out_request_size=request_size;}
printf("MAX_THROUGHPUT | size : %d KB, num : %d, mode : %d, user_thread : %d, block : %d, thread : %d, p2p_mode : %d\n",
request_size/1024, request_num,request_mode, num_thread, gpu_block,gpu_thread,call);
printf("MAX_THROUGHPUT | user_thread : %d, chunk_size : %lf MB, chunk_request_cnt : %d, next_key %d\n",num_thread, (double)chunk_size/(1024*1024),chunk_cnt,next_key);
for(i=0;i<num_thread;i++){
request_start[i]=i*chunk_cnt;
}
pthread_attr_t attr;
pthread_attr_init( &attr );
pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
// return 0;
//goto FREE_END;
// return 0;
// return 0;
// pthread_spin_init(&(request_lock[0]),0);
// pthread_spin_init(&(request_lock[1]),0);
// memset(request_status[0],0,sizeof(int)*MAX_CHUNK_NUM):
// memset(request_status[1],0,sizeof(int)*MAX_CHUNK_NUM):
for(i=0;i<2;i++){
for(j=0;j<num_thread;j++){
task_data[i][j].id=i; //for DISK read threads
task_data[i][j].i=j;
task_data[i][j].num_thread=num_thread;
task_data[i][j].chunk_cnt=chunk_cnt;
task_data[i][j].request_num=request_num;
if(i==0){
task_data[i][j].request_size=request_size;
if(call==0) task_data[i][j].ptr=user_get;
else task_data[i][j].ptr=NULL;
}
else{
task_data[i][j].request_size=out_request_size;
task_data[i][j].ptr=user_put;
}
task_data[i][j].request_list=request_list;
task_data[i][j].call=call;
}
}
time_before= _timestamp();
for(i=0;i<num_thread;i++){
if(request_mode==0){
//printf("11111111111");
pthread_create( (pthread_t*)&(rwLoopTasksIDs[0][i]), &attr, pipe_task, (Pipe*)&(task_data[0][i]) );
}
else if(request_mode==1)
pthread_create( (pthread_t*)&(rwLoopTasksIDs[1][i]), &attr, pipe_task, (Pipe*)&(task_data[1][i]) );
else{
// printf("thread 2!\n");
pthread_create( (pthread_t*)&(rwLoopTasksIDs[0][i]), &attr, pipe_task, (Pipe*)&(task_data[0][i]) );
pthread_create( (pthread_t*)&(rwLoopTasksIDs[1][i]), &attr, pipe_task, (Pipe*)&(task_data[1][i]) );
}
}
pthread_attr_destroy(&attr);
// done=1;
for(i=0;i<num_thread;i++){
if(request_mode!=1) pthread_join(rwLoopTasksIDs[0][i],NULL);
if(request_mode!=0) pthread_join(rwLoopTasksIDs[1][i],NULL);
}
time_after = _timestamp();
double total_time = elapsed_time(time_before,time_after);
double total_size=0;
for(i=0;i<num_thread;i++){
total_size+=transfer_size[0][i]+transfer_size[1][i];
}
//printf("Transfer time: %lf ms\tCount: %d\tSize: %lf MB\n", totalTime / 1e3,totalCount,(double)totalSize/(1<<20));////1e6); //ms....
//printf( "\tBandwidth: %lfGB/s\n\n", ((double)totalSize / (1 << 30)) / (totalTime / 1e6));//1e9));
printf("MAX_THROUGHPUT | Total time: %lf s\tSize: %lf MB\tBandwidth %lf GB/s \n\n\n", total_time,(total_size/(1<<20)),(total_size/(1<<30))/total_time );///9));
//Check for errors and failed asserts in asynchronous kernel launch.
//////////Success check
/*
if(request_mode==0){
char temp_value;
int diff=0,iter=0;
ASSERTRT(hipMemcpy(temp_data,cuda_memory,gpu_memory_size,hipMemcpyDeviceToHost));
for(i=0;i<gpu_memory_size;i+=request_size){
temp_value=temp_data[i];
for(j=i;j<i+request_size;j++){
if(temp_value!=temp_data[j] && diff<10){ printf("[diff] %c != %c\n",temp_data[j],temp_value); diff++;}
if(iter<10 && j-i<10) printf("%c",temp_data[j]);
}
if(iter<10) printf("\n");
iter++;
}
printf("total diff : %d\n",diff);
}
else if(request_mode==1){ //write Check
memset(buffer[0],0,buffer_size);
int iter=0,diff=0;
for(i=START_KEY;i<START_KEY+request_num;i++){
key_op(GET,i,buffer[0],request_size);
for(j=0;j<request_size;j++){
if(buffer[0][j]!=buffer[0][0] && diff<10){ printf("[diff] %c != %c\n",buffer[0][j],buffer[0][0]); diff++;}
if(iter<10 && j<10) printf("%c",buffer[0][j]);
}
if(iter<10) printf("\n");
iter++;
}
printf("total diff : %d\n",diff);
}
*/
//PRINT_DEBUG;
///////////////////////////////////////////
/////All Freeeeee
// fprintf(stderr, "\n");
free(user_get);
free(user_put);
free(request_list);
// free(temp_data);
key_close(F_NAME);
hipDeviceReset();
return 0;
}
| 3d1dea42345ad71b8d4ac9aa2305e36e8aa39511.cu | #include <stdio.h>
#include<stdlib.h>
#include <errno.h>
#include "util.cu.h"
#include "key-p2p.h"
#include "key_api.h"
#ifndef CUDA_SAFE_CALL
#define CUDA_SAFE_CALL(err) __checkCudaErrors (err, __FILE__, __LINE__)
#endif
#include<algorithm>
#include<pthread.h>
#include"original_gpu.cu"
//1using namespace std;
#ifdef TRACE
#define PRINT_TRACE(...) fprintf(stderr, __VA_ARGS__);
#else
#define PRINT_TRACE(...)
#endif
#ifdef TIMING_STATS
#define PRINT_TIMES(...) fprintf(stderr, __VA_ARGS__);
#else
#define PRINT_TIMES(...)
#endif
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int) err, cudaGetErrorString(err));
exit(-1);
}
}
#define START_KEY 10000
#define START_PUT 1000000
#define MAX_CHUNK_NUM 8 //64
pthread_t rwLoopTasksIDs[2][MAX_CHUNK_NUM];
volatile int done = 0;
//pthread_mutex_t rwLoopTasksLocks[RW_HOST_WORKERS];
//pthread_cond_t rwLoopTasksConds[RW_HOST_WORKERS];
//pthread_t rwLoopTasksIDs[RW_HOST_WORKERS];
//TaskData rwLoopTasksData[RW_HOST_WORKERS];
typedef struct _pipe{
int id;
int num_thread;
int i;
int chunk_cnt;
int request_num;
unsigned int request_size;
int *request_list;
char *ptr;
int call;
}Pipe;
Pipe task_data[2][MAX_CHUNK_NUM];
unsigned long transfer_size[2][MAX_CHUNK_NUM]={0};
volatile int request_status[2][MAX_CHUNK_NUM]={0};
pthread_spinlock_t request_lock[2][MAX_CHUNK_NUM];
pthread_mutex_t cond_lock[2][MAX_CHUNK_NUM];
pthread_cond_t cond[2][MAX_CHUNK_NUM];
void* pipe_task(void* param){
Pipe* s=(Pipe*)param;
int request_start[MAX_CHUNK_NUM]={0};
int id=s->id;
int chunk_cnt=s->chunk_cnt;
int request_num=s->request_num;
int* request_list=s->request_list;
unsigned int request_size=s->request_size;
int num_thread=s->num_thread;
int call=s->call;
char *buffer=s->ptr;
unsigned long chunk_size=(unsigned long)request_size*chunk_cnt;
int next_key=chunk_cnt*num_thread;
int i,j,start;
int ret;
char *temp_buffer;
// printf("222222222222222222222");
if(posix_memalign((void**)&temp_buffer,KEY_PAGE_SIZE,request_size)){
printf("can not allocate io payload buffer!\n");
return NULL;
}
i=s->i; //num_thread;
// for(i=0;i<num_thread;i++){
request_start[i]=i*chunk_cnt;
// }
// printf("thread %d start!\n",id);
while(!done){
// break;
int cnt=0,complete_cnt=0;
// for(i=0;i<num_thread;i++){
if(request_start[i]>=request_num){
complete_cnt++; break;
}
int status;
/*
pthread_spin_lock(&request_lock[id][i]);
status=request_status[id][i];
pthread_spin_unlock(&request_lock[id][i]);
if((id==0 & status!=0) || (id==1 && status!=1) ){
pthread_mutex_lock(&cond_lock[id][i]);
pthread_cond_signal(&cond[id][i]);
pthread_mutex_unlock(&cond_lock[id][i]);
}
*/
status=0; /////////////////////////////////
// printf("thread_start!\n");
if(id==0 && status==0){
cnt++;
int end=min(request_start[i]+chunk_cnt,request_num);
start=0;
for(j=request_start[i];j<end;j++){
if(call==0){//no p2p
ret=key_op(KEY_GET,request_list[j],temp_buffer,request_size);
// memcpy(buffer+(i*chunk_size)+(start*request_size),temp_buffer,request_size);
// ret=key_op(KEY_GET,request_list[j],buffer+(i*chunk_size)+(start*request_size),request_size);
}
else{
ret=key_p2p_op(KEY_GET,request_list[j],(i*chunk_size)+(start*request_size),request_size);
}
if(ret<=0) printf("key_op error!\n");
start++;
}
transfer_size[id][i]+=(unsigned long)(end-request_start[i])*request_size;
request_start[i]+=next_key;
/*
pthread_spin_lock(&request_lock[id][i]);
request_status[id][i]=1;
pthread_spin_unlock(&request_lock[id][i]);
*/
}
else if(id==1 && status==1){
cnt++;
int end=min(request_start[i]+chunk_cnt,request_num);
start=0;
for(j=request_start[i];j<end;j++){
memcpy(temp_buffer, buffer+(i*chunk_size)+(start*request_size),request_size);
ret=key_op(KEY_PUT,START_PUT+request_list[j],temp_buffer,request_size);
//ret=key_op(KEY_PUT, START_PUT+request_list[j], buffer+(i*chunk_size)+(start*request_size),request_size);
if(ret<0) printf("key_op error!\n");
start++;
}
transfer_size[id][i]+=(unsigned long)(end-request_start[i])*request_size;
request_start[i]+=next_key;
/*
pthread_spin_lock(&request_lock[id][i]);
request_status[id][i]=0;
pthread_spin_unlock(&request_lock[id][i]);
*/
}
// }
// if(complete_cnt>=num_thread) break;
// if(cnt==0) usleep(100);
}
if(id==1){
// for(i=0;i<num_thread;i++){
/*
pthread_spin_lock(&request_lock[id][i]);
request_status[id][i]=2;
pthread_spin_unlock(&request_lock[id][i]);
*/
// }
}
free(temp_buffer);
return NULL;
}
int main(int argc, char** argv)
{
int device = 0;
int request_num=1024, request_mode=2, num_thread=1, handle=1,call=0;
unsigned int request_size=1024*1024;
int gpu_thread=64,gpu_block=256;
int gpu_memory_size=(192*1024*1024); ///p2p mem :220MB
int batch=32;
int ret=0;
while(1){
int c;
c=getopt(argc,argv,"s:n:m:p:i:b:t:c:q:h");
if(c==-1) break;
switch(c){
case 's':
request_size=strtol(optarg,NULL,0);
break;
case 'n':
request_num=strtol(optarg,NULL,0);
break;
case 'm':
request_mode=strtol(optarg,NULL,0);
break;
case 'p':
num_thread=strtol(optarg,NULL,0);
break;
case 'i':
handle=strtol(optarg,NULL,0);
break;
case 'b':
gpu_block=strtol(optarg,NULL,0);
break;
case 't':
gpu_thread=strtol(optarg,NULL,0);
break;
case 'c':
call=strtol(optarg,NULL,0);
break;
case 'q':
batch=strtol(optarg,NULL,0);
break;
case 'h':
printf("syntax: %s -s <request_size> -n <num_request> -m <mode:get-0,put-1,get_and_put-2,program-3> -p <num_polling_thread> -i <process:one-0,batch_async-1,batch_sync-2> -q <batch_size> -b <gpu block> -t < gpu theads in block> -c <call:0-thread,1-warp,2-block> -h\n",argv[0]);
exit(1);
break;
default:
printf("ERROR: invalid option\n");
exit(1);
break;
}
}
char *user_get,*user_put;
user_get=(char*)malloc(sizeof(char)*gpu_memory_size);
user_put=(char*)malloc(sizeof(char)*gpu_memory_size);
// printf("1111111111111\n");
///////////////////////////////////////////////////////
///////key_value open
int key_ret=key_open(F_NAME);
// printf("1111111111111\n");
//////////////////////////////////////////////////////
//////request list make
int *request_list;
int i,j;
request_list=(int*)malloc(sizeof(int)*request_num);
for(i=0;i<request_num;i++){
request_list[i]=i+START_KEY;
}
random_shuffle(request_list,request_list+request_num);
// ASSERTRT(cudaMalloc((void**)&cuda_request,sizeof(int)*request_num));
// ASSERTRT(cudaMemcpy(cuda_request,request_list,sizeof(int)*request_num,cudaMemcpyHostToDevice));
/////////////////////////////////////////////////////
///////push data to key_value_SSD or GPU_MEMORY
// char *temp_data=(char*)malloc(sizeof(char)*gpu_memory_size);
// memset(temp_data,0,gpu_memory_size);
int temp_cnt=0;
// printf("1111111111111\n");
////////ready for START_KEY
int request_start[MAX_CHUNK_NUM]={0};
int stage[MAX_CHUNK_NUM]={0};
int chunk_cnt=(gpu_memory_size/num_thread)/request_size;
int next_key=chunk_cnt*num_thread;
unsigned long chunk_size=(unsigned long)chunk_cnt*request_size;
unsigned long out_size;
unsigned int out_request_size;
int n=sqrt(gpu_thread);
dim3 nthreads(n,n);
int cuda_block=gpu_block/num_thread;
double time_before,time_after;
if(request_mode==3){ out_size=chunk_size/3; out_request_size=request_size/3;} //image
else{ out_size=chunk_size; out_request_size=request_size;}
printf("MAX_THROUGHPUT | size : %d KB, num : %d, mode : %d, user_thread : %d, block : %d, thread : %d, p2p_mode : %d\n",
request_size/1024, request_num,request_mode, num_thread, gpu_block,gpu_thread,call);
printf("MAX_THROUGHPUT | user_thread : %d, chunk_size : %lf MB, chunk_request_cnt : %d, next_key %d\n",num_thread, (double)chunk_size/(1024*1024),chunk_cnt,next_key);
for(i=0;i<num_thread;i++){
request_start[i]=i*chunk_cnt;
}
pthread_attr_t attr;
pthread_attr_init( &attr );
pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
// return 0;
//goto FREE_END;
// return 0;
// return 0;
// pthread_spin_init(&(request_lock[0]),0);
// pthread_spin_init(&(request_lock[1]),0);
// memset(request_status[0],0,sizeof(int)*MAX_CHUNK_NUM):
// memset(request_status[1],0,sizeof(int)*MAX_CHUNK_NUM):
for(i=0;i<2;i++){
for(j=0;j<num_thread;j++){
task_data[i][j].id=i; //for DISK read threads
task_data[i][j].i=j;
task_data[i][j].num_thread=num_thread;
task_data[i][j].chunk_cnt=chunk_cnt;
task_data[i][j].request_num=request_num;
if(i==0){
task_data[i][j].request_size=request_size;
if(call==0) task_data[i][j].ptr=user_get;
else task_data[i][j].ptr=NULL;
}
else{
task_data[i][j].request_size=out_request_size;
task_data[i][j].ptr=user_put;
}
task_data[i][j].request_list=request_list;
task_data[i][j].call=call;
}
}
time_before= _timestamp();
for(i=0;i<num_thread;i++){
if(request_mode==0){
//printf("11111111111");
pthread_create( (pthread_t*)&(rwLoopTasksIDs[0][i]), &attr, pipe_task, (Pipe*)&(task_data[0][i]) );
}
else if(request_mode==1)
pthread_create( (pthread_t*)&(rwLoopTasksIDs[1][i]), &attr, pipe_task, (Pipe*)&(task_data[1][i]) );
else{
// printf("thread 2!\n");
pthread_create( (pthread_t*)&(rwLoopTasksIDs[0][i]), &attr, pipe_task, (Pipe*)&(task_data[0][i]) );
pthread_create( (pthread_t*)&(rwLoopTasksIDs[1][i]), &attr, pipe_task, (Pipe*)&(task_data[1][i]) );
}
}
pthread_attr_destroy(&attr);
// done=1;
for(i=0;i<num_thread;i++){
if(request_mode!=1) pthread_join(rwLoopTasksIDs[0][i],NULL);
if(request_mode!=0) pthread_join(rwLoopTasksIDs[1][i],NULL);
}
time_after = _timestamp();
double total_time = elapsed_time(time_before,time_after);
double total_size=0;
for(i=0;i<num_thread;i++){
total_size+=transfer_size[0][i]+transfer_size[1][i];
}
//printf("Transfer time: %lf ms\tCount: %d\tSize: %lf MB\n", totalTime / 1e3,totalCount,(double)totalSize/(1<<20));////1e6); //ms....
//printf( "\tBandwidth: %lfGB/s\n\n", ((double)totalSize / (1 << 30)) / (totalTime / 1e6));//1e9));
printf("MAX_THROUGHPUT | Total time: %lf s\tSize: %lf MB\tBandwidth %lf GB/s \n\n\n", total_time,(total_size/(1<<20)),(total_size/(1<<30))/total_time );///9));
//Check for errors and failed asserts in asynchronous kernel launch.
//////////Success check
/*
if(request_mode==0){
char temp_value;
int diff=0,iter=0;
ASSERTRT(cudaMemcpy(temp_data,cuda_memory,gpu_memory_size,cudaMemcpyDeviceToHost));
for(i=0;i<gpu_memory_size;i+=request_size){
temp_value=temp_data[i];
for(j=i;j<i+request_size;j++){
if(temp_value!=temp_data[j] && diff<10){ printf("[diff] %c != %c\n",temp_data[j],temp_value); diff++;}
if(iter<10 && j-i<10) printf("%c",temp_data[j]);
}
if(iter<10) printf("\n");
iter++;
}
printf("total diff : %d\n",diff);
}
else if(request_mode==1){ //write Check
memset(buffer[0],0,buffer_size);
int iter=0,diff=0;
for(i=START_KEY;i<START_KEY+request_num;i++){
key_op(GET,i,buffer[0],request_size);
for(j=0;j<request_size;j++){
if(buffer[0][j]!=buffer[0][0] && diff<10){ printf("[diff] %c != %c\n",buffer[0][j],buffer[0][0]); diff++;}
if(iter<10 && j<10) printf("%c",buffer[0][j]);
}
if(iter<10) printf("\n");
iter++;
}
printf("total diff : %d\n",diff);
}
*/
//PRINT_DEBUG;
///////////////////////////////////////////
/////All Freeeeee
// fprintf(stderr, "\n");
free(user_get);
free(user_put);
free(request_list);
// free(temp_data);
key_close(F_NAME);
cudaDeviceReset();
return 0;
}
|
323cf513220db9855e7a34fabd18458c66872340.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// Do not time memory allocation
int* dev_odata;
hipMalloc((void**)&dev_odata, n * sizeof(int));
int* dev_idata;
hipMalloc((void**)&dev_idata, n * sizeof(int));
hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
thrust::device_ptr<int> dv_out(dev_odata);
thrust::device_ptr<int> dv_in(dev_idata);
// Time everything else
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in, dv_in + n, dv_out);
timer().endGpuTimer();
// Get the return value off of the device and free memory.
hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(dev_odata);
hipFree(dev_idata);
}
}
}
| 323cf513220db9855e7a34fabd18458c66872340.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// Do not time memory allocation
int* dev_odata;
cudaMalloc((void**)&dev_odata, n * sizeof(int));
int* dev_idata;
cudaMalloc((void**)&dev_idata, n * sizeof(int));
cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
thrust::device_ptr<int> dv_out(dev_odata);
thrust::device_ptr<int> dv_in(dev_idata);
// Time everything else
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in, dv_in + n, dv_out);
timer().endGpuTimer();
// Get the return value off of the device and free memory.
cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_odata);
cudaFree(dev_idata);
}
}
}
|
cdc3e312f848be22532cb54cc6158cec37178507.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
/* Custom config file written by Python code*/
#include <config.h>
namespace
{
template <typename scalar_t>
__global__ void graph_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> dyn_input,
const torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> lat_input,
torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> out,
const int lat_size) {
/*
[Note that the out variable is the output of this function,
but the content will be used later as the input of the network!]
This kernel implements the concatination of the dynamical and lateral input of the network
and it implements the graph connections based on the grid of PKs
(creation of the lateral input from the lateral output).
Every PK is connected with its surrounding neighbors.
There can be up to 8 neighbors (top left, top, top right, left, right,
bottom left, bottom, bottom right).
The corners of the grid have only 3 neighbors,
the nodes at the edge of the grid have only 5 neighbors.
:param dyn_input: Is the dynamical input of the current time step
:paran lat_input: Is actually the lateral output of the former time step,
which now became the input of this kernel
:param out: Output of this function
CUDA specific information:
Number of blocks in the grid:
gridDim.x number of blocks in the x dimension of the grid
gridDim.y number of blocks in the y dimension of the grid
Number of threads in a block:
blockDim.x number of threads in the x dimension if the grid
blockDim.y number of threads in the y dimension if the grid
Block Index:
blockIdx.x blocks index in x dimension
blockIdx.y blocks index in y dimension
Thread Index:
threadIdx.x threads index in x dimension
threadIdx.y threads index in y dimension
*/
/*
Calculating block index:
row no (blockIdx.y) * length of row (gridDim.x) + row position (blockIdx.x)
*/
const int batch_block_id = blockIdx.y * gridDim.x + blockIdx.x;
/*
Calculating thread index:
like block_id, see above
*/
const int pk_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
/* Setting the dynamical input of the network */
for (int dyn = 0; dyn < DYN_SIZE; dyn++)
{
out[batch_block_id][pk_thread_id][dyn] = dyn_input[batch_block_id][pk_thread_id][dyn];
}
/* Variables to access the correct neighbors */
const int top = pk_thread_id - PK_COLS;
const int bottom = pk_thread_id + PK_COLS;
const bool y_gt_0 = threadIdx.y > 0;
const bool x_gt_0 = threadIdx.x > 0;
const bool y_lt_max = threadIdx.y < PK_ROWS - 1;
const bool x_lt_max = threadIdx.x < PK_COLS - 1;
/* Setting the lateral input of the network */
for (int lat = 0; lat < lat_size; lat++)
{
/* TOP GROUP */
if (y_gt_0)
{
/* TOP LEFT */
if (x_gt_0)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat] = lat_input[batch_block_id][top-1][lat];
}
/* TOP CENTER */
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size + lat] = lat_input[batch_block_id][top][lat];
/* TOP RIGHT */
if (x_lt_max)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 2 + lat] = lat_input[batch_block_id][top+1][lat];
}
}
/* LEFT */
if(x_gt_0)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 3 + lat] = lat_input[batch_block_id][pk_thread_id-1][lat];
}
/* RIGHT */
if(x_lt_max)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 4 + lat] = lat_input[batch_block_id][pk_thread_id+1][lat];
}
/* BOTTOM GROUP */
if (y_lt_max)
{
/* BOTTOM LEFT */
if (x_gt_0)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 5 + lat] = lat_input[batch_block_id][bottom-1][lat];
}
/* BOTTOM CENTER */
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 6 + lat] = lat_input[batch_block_id][bottom][lat];
/* BOTTOM RIGHT */
if (x_lt_max)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 7 + lat] = lat_input[batch_block_id][bottom+1][lat];
}
}
/* end of for loop for lateral connections*/
}
/* end of forward pass*/
}
template <typename scalar_t>
__global__ void graph_backward_kernel(
const torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> d_out,
torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> d_dyn_input,
torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> d_lat_input,
const int lat_size)
{
/*
*
*
The same as the forward pass, only the other way round.
*
*
*/
/*
Calculating block index:
row no (blockIdx.y) * length of row (gridDim.x) + row position (blockIdx.x)
*/
const int batch_block_id = blockIdx.y * gridDim.x + blockIdx.x;
/*
Calculating thread index:
like block_id, see above
*/
const int pk_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
for (int dyn = 0; dyn < DYN_SIZE; dyn++)
{
d_dyn_input[batch_block_id][pk_thread_id][dyn] = d_out[batch_block_id][pk_thread_id][dyn];
}
const int top = pk_thread_id - PK_COLS;
const int bottom = pk_thread_id + PK_COLS;
const bool y_gt_0 = threadIdx.y > 0;
const bool x_gt_0 = threadIdx.x > 0;
const bool y_lt_max = threadIdx.y < PK_ROWS - 1;
const bool x_lt_max = threadIdx.x < PK_COLS - 1;
for (int lat = 0; lat < lat_size; lat++)
{
/* TOP GROUP */
if (y_gt_0)
{
/* TOP LEFT */
if (x_gt_0)
{
d_lat_input[batch_block_id][top-1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat];
}
/* TOP CENTER */
d_lat_input[batch_block_id][top][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size + lat];
/* TOP RIGHT */
if (x_lt_max)
{
d_lat_input[batch_block_id][top+1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 2 + lat];
}
}
/* LEFT */
if(x_gt_0)
{
d_lat_input[batch_block_id][pk_thread_id-1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 3 + lat];
}
/* RIGHT */
if(x_lt_max)
{
d_lat_input[batch_block_id][pk_thread_id+1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 4 + lat];
}
/* BOTTOM GROUP */
if (y_lt_max)
{
/* BOTTOM LEFT */
if (x_gt_0)
{
d_lat_input[batch_block_id][bottom-1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 5 + lat];
}
/* BOTTOM CENTER */
d_lat_input[batch_block_id][bottom][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 6 + lat];
/* BOTTOM RIGHT */
if (x_lt_max)
{
d_lat_input[batch_block_id][bottom+1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 7 + lat];
}
}
/* end of for loop for lateral connections*/
}
/* end of backward pass */
}
/* end of namespace */
}
std::vector<torch::Tensor> graph_cuda_forward(
torch::Tensor dyn_input,
torch::Tensor lat_input) {
/* get the torch tensor options to specify the gpu usage later */
auto options = torch::TensorOptions().device(torch::kCUDA).requires_grad(true);
/* set the batch size dynamically by means of the input shape*/
const auto batch_size = dyn_input.size(0);
const auto amount_pks = dyn_input.size(1);
const int lat_size = lat_input.size(2);
/* allocate enough memory space for the output of the kernel function */
auto out = torch::zeros({batch_size, amount_pks,
DYN_SIZE + NEIGHBORS * lat_size}, options);
/* map the grid of PKs to the grid of threads per block*/
const dim3 threads(PK_COLS, PK_ROWS);
/* map batches to blocks*/
const dim3 blocks(batch_size);
/* call the forward kernel function */
AT_DISPATCH_FLOATING_TYPES(out.type(), "graph_forward_kernel", ([&] {
hipLaunchKernelGGL(( graph_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
dyn_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
lat_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
out.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
lat_size
);
}));
return {out};
}
std::vector<torch::Tensor> graph_cuda_backward(
torch::Tensor d_out)
{
/* set the batch size dynamically by means of the input shape*/
const auto batch_size = d_out.size(0);
const auto amount_pks = d_out.size(1);
const auto total = d_out.size(2);
const int lat_size = (total - DYN_SIZE)/NEIGHBORS;
/* get the torch tensor options to specify the gpu usage later */
auto options = torch::TensorOptions().device(torch::kCUDA).requires_grad(true);
/* allocate enough memory space for the output of the kernel function */
auto d_dyn_input = torch::zeros({batch_size, amount_pks, DYN_SIZE}, options);
auto d_lat_input = torch::zeros({batch_size, amount_pks, lat_size}, options);
/* map the grid of PKs to the grid of threads per block*/
const dim3 threads(PK_ROWS, PK_COLS);
/* map batches to blocks*/
const dim3 blocks(batch_size);
/* call the backward kernel function */
AT_DISPATCH_FLOATING_TYPES(d_dyn_input.type(), "graph_backward_kernel", ([&] {
hipLaunchKernelGGL(( graph_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
d_out.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
d_dyn_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
d_lat_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
lat_size);
}));
return {d_dyn_input, d_lat_input};
}
| cdc3e312f848be22532cb54cc6158cec37178507.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
/* Custom config file written by Python code*/
#include <config.h>
namespace
{
template <typename scalar_t>
__global__ void graph_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> dyn_input,
const torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> lat_input,
torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> out,
const int lat_size) {
/*
[Note that the out variable is the output of this function,
but the content will be used later as the input of the network!]
This kernel implements the concatination of the dynamical and lateral input of the network
and it implements the graph connections based on the grid of PKs
(creation of the lateral input from the lateral output).
Every PK is connected with its surrounding neighbors.
There can be up to 8 neighbors (top left, top, top right, left, right,
bottom left, bottom, bottom right).
The corners of the grid have only 3 neighbors,
the nodes at the edge of the grid have only 5 neighbors.
:param dyn_input: Is the dynamical input of the current time step
:paran lat_input: Is actually the lateral output of the former time step,
which now became the input of this kernel
:param out: Output of this function
CUDA specific information:
Number of blocks in the grid:
gridDim.x — number of blocks in the x dimension of the grid
gridDim.y — number of blocks in the y dimension of the grid
Number of threads in a block:
blockDim.x — number of threads in the x dimension if the grid
blockDim.y — number of threads in the y dimension if the grid
Block Index:
blockIdx.x — block’s index in x dimension
blockIdx.y — block’s index in y dimension
Thread Index:
threadIdx.x — thread’s index in x dimension
threadIdx.y — thread’s index in y dimension
*/
/*
Calculating block index:
row no (blockIdx.y) * length of row (gridDim.x) + row position (blockIdx.x)
*/
const int batch_block_id = blockIdx.y * gridDim.x + blockIdx.x;
/*
Calculating thread index:
like block_id, see above
*/
const int pk_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
/* Setting the dynamical input of the network */
for (int dyn = 0; dyn < DYN_SIZE; dyn++)
{
out[batch_block_id][pk_thread_id][dyn] = dyn_input[batch_block_id][pk_thread_id][dyn];
}
/* Variables to access the correct neighbors */
const int top = pk_thread_id - PK_COLS;
const int bottom = pk_thread_id + PK_COLS;
const bool y_gt_0 = threadIdx.y > 0;
const bool x_gt_0 = threadIdx.x > 0;
const bool y_lt_max = threadIdx.y < PK_ROWS - 1;
const bool x_lt_max = threadIdx.x < PK_COLS - 1;
/* Setting the lateral input of the network */
for (int lat = 0; lat < lat_size; lat++)
{
/* TOP GROUP */
if (y_gt_0)
{
/* TOP LEFT */
if (x_gt_0)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat] = lat_input[batch_block_id][top-1][lat];
}
/* TOP CENTER */
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size + lat] = lat_input[batch_block_id][top][lat];
/* TOP RIGHT */
if (x_lt_max)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 2 + lat] = lat_input[batch_block_id][top+1][lat];
}
}
/* LEFT */
if(x_gt_0)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 3 + lat] = lat_input[batch_block_id][pk_thread_id-1][lat];
}
/* RIGHT */
if(x_lt_max)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 4 + lat] = lat_input[batch_block_id][pk_thread_id+1][lat];
}
/* BOTTOM GROUP */
if (y_lt_max)
{
/* BOTTOM LEFT */
if (x_gt_0)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 5 + lat] = lat_input[batch_block_id][bottom-1][lat];
}
/* BOTTOM CENTER */
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 6 + lat] = lat_input[batch_block_id][bottom][lat];
/* BOTTOM RIGHT */
if (x_lt_max)
{
out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 7 + lat] = lat_input[batch_block_id][bottom+1][lat];
}
}
/* end of for loop for lateral connections*/
}
/* end of forward pass*/
}
template <typename scalar_t>
__global__ void graph_backward_kernel(
const torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> d_out,
torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> d_dyn_input,
torch::PackedTensorAccessor32<scalar_t,DIMS,torch::RestrictPtrTraits> d_lat_input,
const int lat_size)
{
/*
*
*
The same as the forward pass, only the other way round.
*
*
*/
/*
Calculating block index:
row no (blockIdx.y) * length of row (gridDim.x) + row position (blockIdx.x)
*/
const int batch_block_id = blockIdx.y * gridDim.x + blockIdx.x;
/*
Calculating thread index:
like block_id, see above
*/
const int pk_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
for (int dyn = 0; dyn < DYN_SIZE; dyn++)
{
d_dyn_input[batch_block_id][pk_thread_id][dyn] = d_out[batch_block_id][pk_thread_id][dyn];
}
const int top = pk_thread_id - PK_COLS;
const int bottom = pk_thread_id + PK_COLS;
const bool y_gt_0 = threadIdx.y > 0;
const bool x_gt_0 = threadIdx.x > 0;
const bool y_lt_max = threadIdx.y < PK_ROWS - 1;
const bool x_lt_max = threadIdx.x < PK_COLS - 1;
for (int lat = 0; lat < lat_size; lat++)
{
/* TOP GROUP */
if (y_gt_0)
{
/* TOP LEFT */
if (x_gt_0)
{
d_lat_input[batch_block_id][top-1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat];
}
/* TOP CENTER */
d_lat_input[batch_block_id][top][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size + lat];
/* TOP RIGHT */
if (x_lt_max)
{
d_lat_input[batch_block_id][top+1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 2 + lat];
}
}
/* LEFT */
if(x_gt_0)
{
d_lat_input[batch_block_id][pk_thread_id-1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 3 + lat];
}
/* RIGHT */
if(x_lt_max)
{
d_lat_input[batch_block_id][pk_thread_id+1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 4 + lat];
}
/* BOTTOM GROUP */
if (y_lt_max)
{
/* BOTTOM LEFT */
if (x_gt_0)
{
d_lat_input[batch_block_id][bottom-1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 5 + lat];
}
/* BOTTOM CENTER */
d_lat_input[batch_block_id][bottom][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 6 + lat];
/* BOTTOM RIGHT */
if (x_lt_max)
{
d_lat_input[batch_block_id][bottom+1][lat] += d_out[batch_block_id][pk_thread_id][DYN_SIZE + lat_size * 7 + lat];
}
}
/* end of for loop for lateral connections*/
}
/* end of backward pass */
}
/* end of namespace */
}
std::vector<torch::Tensor> graph_cuda_forward(
torch::Tensor dyn_input,
torch::Tensor lat_input) {
/* get the torch tensor options to specify the gpu usage later */
auto options = torch::TensorOptions().device(torch::kCUDA).requires_grad(true);
/* set the batch size dynamically by means of the input shape*/
const auto batch_size = dyn_input.size(0);
const auto amount_pks = dyn_input.size(1);
const int lat_size = lat_input.size(2);
/* allocate enough memory space for the output of the kernel function */
auto out = torch::zeros({batch_size, amount_pks,
DYN_SIZE + NEIGHBORS * lat_size}, options);
/* map the grid of PKs to the grid of threads per block*/
const dim3 threads(PK_COLS, PK_ROWS);
/* map batches to blocks*/
const dim3 blocks(batch_size);
/* call the forward kernel function */
AT_DISPATCH_FLOATING_TYPES(out.type(), "graph_forward_kernel", ([&] {
graph_forward_kernel<scalar_t><<<blocks, threads>>>(
dyn_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
lat_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
out.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
lat_size
);
}));
return {out};
}
std::vector<torch::Tensor> graph_cuda_backward(
torch::Tensor d_out)
{
/* set the batch size dynamically by means of the input shape*/
const auto batch_size = d_out.size(0);
const auto amount_pks = d_out.size(1);
const auto total = d_out.size(2);
const int lat_size = (total - DYN_SIZE)/NEIGHBORS;
/* get the torch tensor options to specify the gpu usage later */
auto options = torch::TensorOptions().device(torch::kCUDA).requires_grad(true);
/* allocate enough memory space for the output of the kernel function */
auto d_dyn_input = torch::zeros({batch_size, amount_pks, DYN_SIZE}, options);
auto d_lat_input = torch::zeros({batch_size, amount_pks, lat_size}, options);
/* map the grid of PKs to the grid of threads per block*/
const dim3 threads(PK_ROWS, PK_COLS);
/* map batches to blocks*/
const dim3 blocks(batch_size);
/* call the backward kernel function */
AT_DISPATCH_FLOATING_TYPES(d_dyn_input.type(), "graph_backward_kernel", ([&] {
graph_backward_kernel<scalar_t><<<blocks, threads>>>(
d_out.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
d_dyn_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
d_lat_input.packed_accessor32<scalar_t,DIMS,torch::RestrictPtrTraits>(),
lat_size);
}));
return {d_dyn_input, d_lat_input};
}
|
56d15f14c805e4a26accbeeef2b9fee896085938.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC) \
struct Tensor##NAME##Op { \
__device__ __forceinline__ void operator()(float* out, float* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(float* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCudaTensor_##NAME(THCState* state, THCudaTensor* self_, THCudaTensor* src) { \
THAssert(THCudaTensor_checkGPU(state, 2, self_, src)); \
if (self_ == src) { \
if (!THCudaTensor_pointwiseApply1(state, self_, Tensor##NAME##Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCudaTensor_resizeAs(state, self_, src); \
\
if (!THCudaTensor_pointwiseApply2(state, self_, src, Tensor##NAME##Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
}
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log, log)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, log1p)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(exp, exp)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cos, cos)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(acos, acos)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cosh, cosh)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sin, sin)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(asin, asin)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sinh, sinh)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tan, tan)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(atan, atan)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tanh, tanh)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sqrt, sqrt)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(ceil, ceil)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, floor)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(abs, fabs)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, roundf)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
struct TensorAddOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out += *in;
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = *in1 + *in2;
}
};
struct TensorCAddOp {
TensorCAddOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = *in1 + val * *in2;
}
float val;
};
void THCudaTensor_cadd(THCState *state, THCudaTensor *self_, THCudaTensor* src1, float value, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == 1.0f) {
// self += src2
if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorAddOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorCAddOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCudaTensor_resizeAs(state, self_, src1);
if (value == 1.0f) {
// self = src1 + src2
if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorAddOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorCAddOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
struct TensorMulOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = *in1 * *in2;
}
};
void THCudaTensor_cmul(THCState *state, THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorMulOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src1);
// self = src1 * src2
if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorMulOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorMaxOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(*out, *in);
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = max(*in1, *in2);
}
};
void THCudaTensor_cmax(THCState *state, THCudaTensor *self, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THCudaTensor_pointwiseApply2(state, self, src2, TensorMaxOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src1);
if (!THCudaTensor_pointwiseApply3(state, self, src1, src2, TensorMaxOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
struct TensorMinOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = min(*out, *in);
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = min(*in1, *in2);
}
};
void THCudaTensor_cmin(THCState *state, THCudaTensor *self, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THCudaTensor_pointwiseApply2(state, self, src2, TensorMinOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src1);
if (!THCudaTensor_pointwiseApply3(state, self, src1, src2, TensorMinOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
struct TensorMaxValueOp {
TensorMaxValueOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out) {
*out = max(*out, val);
}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(*in, val);
}
float val;
};
void THCudaTensor_cmaxValue(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (self == src) {
if (!THCudaTensor_pointwiseApply1(state, self, TensorMaxValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src);
if (!THCudaTensor_pointwiseApply2(state, self, src, TensorMaxValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
struct TensorMinValueOp {
TensorMinValueOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out) {
*out = min(*out, val);
}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = min(*in, val);
}
float val;
};
void THCudaTensor_cminValue(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (self == src) {
if (!THCudaTensor_pointwiseApply1(state, self, TensorMinValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src);
if (!THCudaTensor_pointwiseApply2(state, self, src, TensorMinValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
| 56d15f14c805e4a26accbeeef2b9fee896085938.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC) \
struct Tensor##NAME##Op { \
__device__ __forceinline__ void operator()(float* out, float* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(float* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCudaTensor_##NAME(THCState* state, THCudaTensor* self_, THCudaTensor* src) { \
THAssert(THCudaTensor_checkGPU(state, 2, self_, src)); \
if (self_ == src) { \
if (!THCudaTensor_pointwiseApply1(state, self_, Tensor##NAME##Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCudaTensor_resizeAs(state, self_, src); \
\
if (!THCudaTensor_pointwiseApply2(state, self_, src, Tensor##NAME##Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
}
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log, log)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, log1p)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(exp, exp)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cos, cos)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(acos, acos)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(cosh, cosh)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sin, sin)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(asin, asin)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sinh, sinh)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tan, tan)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(atan, atan)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(tanh, tanh)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(sqrt, sqrt)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(ceil, ceil)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, floor)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(abs, fabs)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, roundf)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
struct TensorAddOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out += *in;
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = *in1 + *in2;
}
};
struct TensorCAddOp {
TensorCAddOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out += val * *in;
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = *in1 + val * *in2;
}
float val;
};
void THCudaTensor_cadd(THCState *state, THCudaTensor *self_, THCudaTensor* src1, float value, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == 1.0f) {
// self += src2
if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorAddOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorCAddOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCudaTensor_resizeAs(state, self_, src1);
if (value == 1.0f) {
// self = src1 + src2
if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorAddOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorCAddOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorMulOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out *= *in;
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = *in1 * *in2;
}
};
void THCudaTensor_cmul(THCState *state, THCudaTensor *self_, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THCudaTensor_pointwiseApply2(state, self_, src2, TensorMulOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src1);
// self = src1 * src2
if (!THCudaTensor_pointwiseApply3(state, self_, src1, src2, TensorMulOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorMaxOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(*out, *in);
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = max(*in1, *in2);
}
};
void THCudaTensor_cmax(THCState *state, THCudaTensor *self, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THCudaTensor_pointwiseApply2(state, self, src2, TensorMaxOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src1);
if (!THCudaTensor_pointwiseApply3(state, self, src1, src2, TensorMaxOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
struct TensorMinOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = min(*out, *in);
}
__device__ __forceinline__ void operator()(float* out, float* in1, float* in2) {
*out = min(*in1, *in2);
}
};
void THCudaTensor_cmin(THCState *state, THCudaTensor *self, THCudaTensor *src1, THCudaTensor *src2)
{
THAssert(THCudaTensor_checkGPU(state, 3, self, src1, src2));
THArgCheck(THCudaTensor_nElement(state, src1) ==
THCudaTensor_nElement(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THCudaTensor_pointwiseApply2(state, self, src2, TensorMinOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src1);
if (!THCudaTensor_pointwiseApply3(state, self, src1, src2, TensorMinOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
struct TensorMaxValueOp {
TensorMaxValueOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out) {
*out = max(*out, val);
}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(*in, val);
}
float val;
};
void THCudaTensor_cmaxValue(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (self == src) {
if (!THCudaTensor_pointwiseApply1(state, self, TensorMaxValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src);
if (!THCudaTensor_pointwiseApply2(state, self, src, TensorMaxValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
struct TensorMinValueOp {
TensorMinValueOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out) {
*out = min(*out, val);
}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = min(*in, val);
}
float val;
};
void THCudaTensor_cminValue(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (self == src) {
if (!THCudaTensor_pointwiseApply1(state, self, TensorMinValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self, src);
if (!THCudaTensor_pointwiseApply2(state, self, src, TensorMinValueOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
|
f71f92e5cda67d13f18494ee14152e35dc0d75c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*==========================================================================
SHA1 KERNEL
* Copyright (c) 2008, NetSysLab at the University of British Columbia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NetSysLab ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NetSysLab BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
DESCRIPTION
CPU version of the storeGPU library.
==========================================================================*/
/*==========================================================================
INCLUDES
==========================================================================*/
#include <string.h>
#include <stdio.h>
#include "cust.h"
/*==========================================================================
DATA DECLARATIONS
==========================================================================*/
/*--------------------------------------------------------------------------
TYPE DEFINITIONS
--------------------------------------------------------------------------*/
typedef struct {
unsigned long total[2]; /*!< number of bytes processed */
unsigned long state[5]; /*!< intermediate digest state */
unsigned char buffer[64]; /*!< data block being processed */
} sha1_context;
/*--------------------------------------------------------------------------
FUNCTION PROTOTYPES
--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
CONSTANTS
--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
GLOBAL VARIABLES
--------------------------------------------------------------------------*/
__device__
static const unsigned char sha1_padding[64] =
{
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/*--------------------------------------------------------------------------
MACROS
--------------------------------------------------------------------------*/
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE 1
#endif
/*
* 32-bit integer manipulation macros (big endian)
*/
#ifndef GET_UINT32_BE
#define GET_UINT32_BE(n,b,i) \
{ \
(n) = ( (unsigned long) (b)[(i) ] << 24 ) \
| ( (unsigned long) (b)[(i) + 1] << 16 ) \
| ( (unsigned long) (b)[(i) + 2] << 8 ) \
| ( (unsigned long) (b)[(i) + 3] ); \
}
#endif
#ifndef PUT_UINT32_BE
#define PUT_UINT32_BE(n,b,i) \
{ \
(b)[(i) ] = (unsigned char) ( (n) >> 24 ); \
(b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \
(b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \
(b)[(i) + 3] = (unsigned char) ( (n) ); \
}
#endif
#ifdef FEATURE_SHARED_MEMORY
// current thread stride.
#undef SHARED_MEMORY_INDEX
#define SHARED_MEMORY_INDEX(index) (32 * (index) + (threadIdx.x & 0x1F))
#endif /* FEATURE_SHARED_MEMORY */
/*--------------------------------------------------------------------------
LOCAL FUNCTIONS
--------------------------------------------------------------------------*/
#ifndef FEATURE_SHARED_MEMORY
/*
* SHA-1 context setup
*/
/*===========================================================================
FUNCTION SHA1_GPU_STARTS
DESCRIPTION
SHA-1 context setup
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_starts( sha1_context *ctx ) {
ctx->total[0] = 0;
ctx->total[1] = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0;
}
/*===========================================================================
FUNCTION SHA1_GPU_PROCESS
DESCRIPTION
SHA1 process buffer
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_process( sha1_context *ctx, unsigned char data[64] ) {
unsigned long temp, W[16], A, B, C, D, E;
GET_UINT32_BE( W[ 0], data, 0 );
GET_UINT32_BE( W[ 1], data, 4 );
GET_UINT32_BE( W[ 2], data, 8 );
GET_UINT32_BE( W[ 3], data, 12 );
GET_UINT32_BE( W[ 4], data, 16 );
GET_UINT32_BE( W[ 5], data, 20 );
GET_UINT32_BE( W[ 6], data, 24 );
GET_UINT32_BE( W[ 7], data, 28 );
GET_UINT32_BE( W[ 8], data, 32 );
GET_UINT32_BE( W[ 9], data, 36 );
GET_UINT32_BE( W[10], data, 40 );
GET_UINT32_BE( W[11], data, 44 );
GET_UINT32_BE( W[12], data, 48 );
GET_UINT32_BE( W[13], data, 52 );
GET_UINT32_BE( W[14], data, 56 );
GET_UINT32_BE( W[15], data, 60 );
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef R
#define R(t) \
( \
temp = W[(t - 3) & 0x0F] ^ W[(t - 8) & 0x0F] ^ \
W[(t - 14) & 0x0F] ^ W[ t & 0x0F], \
( W[t & 0x0F] = S(temp,1) ) \
)
#undef P
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, R(16) );
P( D, E, A, B, C, R(17) );
P( C, D, E, A, B, R(18) );
P( B, C, D, E, A, R(19) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, R(20) );
P( E, A, B, C, D, R(21) );
P( D, E, A, B, C, R(22) );
P( C, D, E, A, B, R(23) );
P( B, C, D, E, A, R(24) );
P( A, B, C, D, E, R(25) );
P( E, A, B, C, D, R(26) );
P( D, E, A, B, C, R(27) );
P( C, D, E, A, B, R(28) );
P( B, C, D, E, A, R(29) );
P( A, B, C, D, E, R(30) );
P( E, A, B, C, D, R(31) );
P( D, E, A, B, C, R(32) );
P( C, D, E, A, B, R(33) );
P( B, C, D, E, A, R(34) );
P( A, B, C, D, E, R(35) );
P( E, A, B, C, D, R(36) );
P( D, E, A, B, C, R(37) );
P( C, D, E, A, B, R(38) );
P( B, C, D, E, A, R(39) );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, R(40) );
P( E, A, B, C, D, R(41) );
P( D, E, A, B, C, R(42) );
P( C, D, E, A, B, R(43) );
P( B, C, D, E, A, R(44) );
P( A, B, C, D, E, R(45) );
P( E, A, B, C, D, R(46) );
P( D, E, A, B, C, R(47) );
P( C, D, E, A, B, R(48) );
P( B, C, D, E, A, R(49) );
P( A, B, C, D, E, R(50) );
P( E, A, B, C, D, R(51) );
P( D, E, A, B, C, R(52) );
P( C, D, E, A, B, R(53) );
P( B, C, D, E, A, R(54) );
P( A, B, C, D, E, R(55) );
P( E, A, B, C, D, R(56) );
P( D, E, A, B, C, R(57) );
P( C, D, E, A, B, R(58) );
P( B, C, D, E, A, R(59) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, R(60) );
P( E, A, B, C, D, R(61) );
P( D, E, A, B, C, R(62) );
P( C, D, E, A, B, R(63) );
P( B, C, D, E, A, R(64) );
P( A, B, C, D, E, R(65) );
P( E, A, B, C, D, R(66) );
P( D, E, A, B, C, R(67) );
P( C, D, E, A, B, R(68) );
P( B, C, D, E, A, R(69) );
P( A, B, C, D, E, R(70) );
P( E, A, B, C, D, R(71) );
P( D, E, A, B, C, R(72) );
P( C, D, E, A, B, R(73) );
P( B, C, D, E, A, R(74) );
P( A, B, C, D, E, R(75) );
P( E, A, B, C, D, R(76) );
P( D, E, A, B, C, R(77) );
P( C, D, E, A, B, R(78) );
P( B, C, D, E, A, R(79) );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
/*===========================================================================
FUNCTION SHA1_CPU_UPDATE
DESCRIPTION
SHA1 update buffer
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_update( sha1_context *ctx, unsigned char *input, int ilen ) {
int fill;
unsigned long left;
if( ilen <= 0 )
return;
left = ctx->total[0] & 0x3F;
fill = 64 - left;
ctx->total[0] += ilen;
ctx->total[0] &= 0xFFFFFFFF;
if ( ctx->total[0] < (unsigned long) ilen )
ctx->total[1]++;
if ( left && ilen >= fill ) {
/*memcpy( (void *) (ctx->buffer + left),
(void *) input, fill );*/
for (int i = 0; i < fill; i++) {
ctx->buffer[i+left] = input[i];
}
sha1_process( ctx, ctx->buffer );
input += fill;
ilen -= fill;
left = 0;
}
while ( ilen >= 64 ) {
sha1_process( ctx, input );
input += 64;
ilen -= 64;
}
if ( ilen > 0 ) {
/*memcpy( (void *) (ctx->buffer + left),
(void *) input, ilen );*/
for (int i = 0; i < ilen; i++) {
ctx->buffer[i+left] = input[i];
}
}
}
/*===========================================================================
FUNCTION SHA1_CPU_FINISH
DESCRIPTION
SHA1 final digest
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_finish( sha1_context *ctx, unsigned char *output ) {
unsigned long last, padn;
unsigned long high, low;
unsigned char msglen[8];
high = ( ctx->total[0] >> 29 )
| ( ctx->total[1] << 3 );
low = ( ctx->total[0] << 3 );
PUT_UINT32_BE( high, msglen, 0 );
PUT_UINT32_BE( low, msglen, 4 );
last = ctx->total[0] & 0x3F;
padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last );
sha1_update( ctx, (unsigned char *) sha1_padding, padn );
sha1_update( ctx, msglen, 8 );
PUT_UINT32_BE( ctx->state[0], output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_BE( ctx->state[1], output, 4 );
PUT_UINT32_BE( ctx->state[2], output, 8 );
PUT_UINT32_BE( ctx->state[3], output, 12 );
PUT_UINT32_BE( ctx->state[4], output, 16 );
#endif
}
/*===========================================================================
FUNCTION SHA1_INTERNAL
DESCRIPTION
Does the real sha1 algorithm
DEPENDENCIES
None
RETURN VALUE
output is the hash result
===========================================================================*/
__device__
void sha1_internal( unsigned char *input, int ilen,
unsigned char *output ) {
sha1_context ctx;
sha1_starts( &ctx );
sha1_update( &ctx, input, ilen );
sha1_finish( &ctx, output );
memset( &ctx, 0, sizeof( sha1_context ) );
}
#endif
#ifdef FEATURE_SHARED_MEMORY
/*===========================================================================
FUNCTION SHA1_INTERNAL
DESCRIPTION
Does the real sha1 algorithm.
DEPENDENCIES
None
RETURN VALUE
output is the hash result
===========================================================================*/
__device__
unsigned long macroRFunction(int t, unsigned int *sharedMemory) {
return sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^
sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)];
}
__device__
static void sha1_internal( unsigned int *input, unsigned int *sharedMemory,
unsigned int chunkSize, unsigned char *output ) {
/* Number of passes (512 bit blocks) we have to do */
int numberOfPasses = chunkSize / 64 + 1;
/* Used during the hashing process */
unsigned long temp, A, B, C, D ,E;
//unsigned long shared14, shared15;
/* Needed to do the little endian stuff */
unsigned char *data = (unsigned char *)sharedMemory;
/* Will hold the hash value through the
intermediate stages of SHA1 algorithm */
unsigned int state0 = 0x67452301;
unsigned int state1 = 0xEFCDAB89;
unsigned int state2 = 0x98BADCFE;
unsigned int state3 = 0x10325476;
unsigned int state4 = 0xC3D2E1F0;
/* int x0 = SHARED_MEMORY_INDEX(0);
int x1 = SHARED_MEMORY_INDEX(1);
int x2 = SHARED_MEMORY_INDEX(2);
int x3 = SHARED_MEMORY_INDEX(3);
int x4 = SHARED_MEMORY_INDEX(4);
int x5 = SHARED_MEMORY_INDEX(5);
int x6 = SHARED_MEMORY_INDEX(6);
int x7 = SHARED_MEMORY_INDEX(7);
int x8 = SHARED_MEMORY_INDEX(8);
int x9 = SHARED_MEMORY_INDEX(9);
int x10 = SHARED_MEMORY_INDEX(10);
int x11 = SHARED_MEMORY_INDEX(11);
int x12 = SHARED_MEMORY_INDEX(12);
int x13 = SHARED_MEMORY_INDEX(13);
int x14 = SHARED_MEMORY_INDEX(14);
int x15 = SHARED_MEMORY_INDEX(15);
*/
#undef GET_CACHED_INDEX
#define GET_CACHED_INDEX(index) SHARED_MEMORY_INDEX(index)//(x##index)
for( int index = 0 ; index < (numberOfPasses) ; index++ ) {
/* Move data to the thread's shared memory space */
sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index];
sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index];
sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index];
sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index];
sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index];
sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index];
sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index];
sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index];
sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index];
sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index];
sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index];
sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index];
sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index];
/* Testing the code with and without this if statement shows that
it has no effect on performance. */
if(index == numberOfPasses -1 ) {
/* The last pass will contain the size of the chunk size (according to
official SHA1 algorithm). */
sharedMemory[GET_CACHED_INDEX(13)] = 0x00000080;
PUT_UINT32_BE( chunkSize >> 29,
data, GET_CACHED_INDEX(14) * 4 );
PUT_UINT32_BE( chunkSize << 3,
data, GET_CACHED_INDEX(15) * 4 );
}
else {
sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index];
sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index];
sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index];
}
/* Get the little endian stuff done. */
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)],
data, GET_CACHED_INDEX(0) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)],
data, GET_CACHED_INDEX(1) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)],
data, GET_CACHED_INDEX(2) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)],
data, GET_CACHED_INDEX(3) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)],
data, GET_CACHED_INDEX(4) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)],
data, GET_CACHED_INDEX(5) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)],
data, GET_CACHED_INDEX(6) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)],
data, GET_CACHED_INDEX(7) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)],
data, GET_CACHED_INDEX(8) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)],
data, GET_CACHED_INDEX(9) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)],
data, GET_CACHED_INDEX(10) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)],
data, GET_CACHED_INDEX(11) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)],
data, GET_CACHED_INDEX(12) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)],
data, GET_CACHED_INDEX(13) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)],
data, GET_CACHED_INDEX(14) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)],
data, GET_CACHED_INDEX(15) * 4 );
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef R
#define R(t) \
( \
temp = macroRFunction(t, sharedMemory) , \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
/*
#define R(t) \
( \
temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \
sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
*/
#undef P
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
A = state0;
B = state1;
C = state2;
D = state3;
E = state4;
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] );
P( E, A, B, C, D, R(16) );
P( D, E, A, B, C, R(17) );
P( C, D, E, A, B, R(18) );
P( B, C, D, E, A, R(19) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, R(20) );
P( E, A, B, C, D, R(21) );
P( D, E, A, B, C, R(22) );
P( C, D, E, A, B, R(23) );
P( B, C, D, E, A, R(24) );
P( A, B, C, D, E, R(25) );
P( E, A, B, C, D, R(26) );
P( D, E, A, B, C, R(27) );
P( C, D, E, A, B, R(28) );
P( B, C, D, E, A, R(29) );
P( A, B, C, D, E, R(30) );
P( E, A, B, C, D, R(31) );
P( D, E, A, B, C, R(32) );
P( C, D, E, A, B, R(33) );
P( B, C, D, E, A, R(34) );
P( A, B, C, D, E, R(35) );
P( E, A, B, C, D, R(36) );
P( D, E, A, B, C, R(37) );
P( C, D, E, A, B, R(38) );
P( B, C, D, E, A, R(39) );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, R(40) );
P( E, A, B, C, D, R(41) );
P( D, E, A, B, C, R(42) );
P( C, D, E, A, B, R(43) );
P( B, C, D, E, A, R(44) );
P( A, B, C, D, E, R(45) );
P( E, A, B, C, D, R(46) );
P( D, E, A, B, C, R(47) );
P( C, D, E, A, B, R(48) );
P( B, C, D, E, A, R(49) );
P( A, B, C, D, E, R(50) );
P( E, A, B, C, D, R(51) );
P( D, E, A, B, C, R(52) );
P( C, D, E, A, B, R(53) );
P( B, C, D, E, A, R(54) );
P( A, B, C, D, E, R(55) );
P( E, A, B, C, D, R(56) );
P( D, E, A, B, C, R(57) );
P( C, D, E, A, B, R(58) );
P( B, C, D, E, A, R(59) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, R(60) );
P( E, A, B, C, D, R(61) );
P( D, E, A, B, C, R(62) );
P( C, D, E, A, B, R(63) );
P( B, C, D, E, A, R(64) );
P( A, B, C, D, E, R(65) );
P( E, A, B, C, D, R(66) );
P( D, E, A, B, C, R(67) );
P( C, D, E, A, B, R(68) );
P( B, C, D, E, A, R(69) );
P( A, B, C, D, E, R(70) );
P( E, A, B, C, D, R(71) );
P( D, E, A, B, C, R(72) );
P( C, D, E, A, B, R(73) );
P( B, C, D, E, A, R(74) );
P( A, B, C, D, E, R(75) );
P( E, A, B, C, D, R(76) );
P( D, E, A, B, C, R(77) );
P( C, D, E, A, B, R(78) );
P( B, C, D, E, A, R(79) );
#undef K
#undef F
state0 += A;
state1 += B;
state2 += C;
state3 += D;
state4 += E;
}
/* Got the hash, store it in the output buffer. */
PUT_UINT32_BE( state0, output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_BE( state1, output, 4 );
PUT_UINT32_BE( state2, output, 8 );
PUT_UINT32_BE( state3, output, 12 );
PUT_UINT32_BE( state4, output, 16 );
#endif
}
__device__
static void sha1_internal_overlap( unsigned int *input, unsigned int *sharedMemory,
unsigned int chunkSize, unsigned char *output ) {
/* Number of passes (512 bit blocks) we have to do */
int numberOfPasses = chunkSize / 64 + 1;
/* Used during the hashing process */
unsigned long temp, A, B, C, D ,E;
//unsigned long shared14, shared15;
/* Needed to do the big endian stuff */
unsigned char *data = (unsigned char *)sharedMemory;
// number of padding bytes.
int numPadBytes = 0;
int numPadInt = 0;
//int numPadRemain = 0;
/* Will hold the hash value through the
intermediate stages of SHA1 algorithm */
unsigned int state0 = 0x67452301;
unsigned int state1 = 0xEFCDAB89;
unsigned int state2 = 0x98BADCFE;
unsigned int state3 = 0x10325476;
unsigned int state4 = 0xC3D2E1F0;
int x0 = SHARED_MEMORY_INDEX(0);
int x1 = SHARED_MEMORY_INDEX(1);
int x2 = SHARED_MEMORY_INDEX(2);
int x3 = SHARED_MEMORY_INDEX(3);
int x4 = SHARED_MEMORY_INDEX(4);
int x5 = SHARED_MEMORY_INDEX(5);
int x6 = SHARED_MEMORY_INDEX(6);
int x7 = SHARED_MEMORY_INDEX(7);
int x8 = SHARED_MEMORY_INDEX(8);
int x9 = SHARED_MEMORY_INDEX(9);
int x10 = SHARED_MEMORY_INDEX(10);
int x11 = SHARED_MEMORY_INDEX(11);
int x12 = SHARED_MEMORY_INDEX(12);
int x13 = SHARED_MEMORY_INDEX(13);
int x14 = SHARED_MEMORY_INDEX(14);
int x15 = SHARED_MEMORY_INDEX(15);
#undef GET_CACHED_INDEX
#define GET_CACHED_INDEX(index) (x##index)
for( int index = 0 ; index < (numberOfPasses) ; index++ ) {
if(index == numberOfPasses -1 ){
numPadBytes = (64-12) - (chunkSize - (numberOfPasses-1)*64);
numPadInt = numPadBytes/sizeof(int);
/*numPadRemain = numPadBytes-numPadInt*sizeof(int);
printf("\nLast loop chunkSize = %d, numberOfPasses= %d and \nnumPadBytes = %d, numPadInt =%d, numPadRemain = %d\n",
chunkSize,numberOfPasses,numPadBytes,numPadInt,numPadRemain);*/
int i=0;
for(i=0;i<numPadInt;i++){
sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0;
}
int j=0;
for(j=0;j<(16-3-numPadInt);j++){
//printf("j= %d\n",j);
sharedMemory[SHARED_MEMORY_INDEX(j)] = input[j + 16 * index];
}
/* The last pass will contain the size of the chunk size (according to
official SHA1 algorithm). */
sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0x00000080;
//printf("the last one at %d\n",13-i);
PUT_UINT32_BE( chunkSize >> 29,
data, GET_CACHED_INDEX(14) * 4 );
PUT_UINT32_BE( chunkSize << 3,
data, GET_CACHED_INDEX(15) * 4 );
}
else{
/* Move data to the thread's shared memory space */
//printf("Not last loop\n");
sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index];
sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index];
sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index];
sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index];
sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index];
sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index];
sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index];
sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index];
sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index];
sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index];
sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index];
sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index];
sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index];
sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index];
sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index];
sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index];
}
/* int k=0;
printf("\nGPU DATA\n");
for(k=0;k<16;k++){
printf("%d\t",sharedMemory[SHARED_MEMORY_INDEX(k)]);
}
printf("\n\n");*/
/* Get the little endian stuff done. */
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)],
data, GET_CACHED_INDEX(0) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)],
data, GET_CACHED_INDEX(1) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)],
data, GET_CACHED_INDEX(2) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)],
data, GET_CACHED_INDEX(3) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)],
data, GET_CACHED_INDEX(4) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)],
data, GET_CACHED_INDEX(5) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)],
data, GET_CACHED_INDEX(6) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)],
data, GET_CACHED_INDEX(7) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)],
data, GET_CACHED_INDEX(8) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)],
data, GET_CACHED_INDEX(9) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)],
data, GET_CACHED_INDEX(10) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)],
data, GET_CACHED_INDEX(11) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)],
data, GET_CACHED_INDEX(12) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)],
data, GET_CACHED_INDEX(13) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)],
data, GET_CACHED_INDEX(14) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)],
data, GET_CACHED_INDEX(15) * 4 );
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef R
#define R(t) \
( \
temp = macroRFunction(t, sharedMemory) , \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
/*
#define R(t) \
( \
temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \
sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
*/
#undef P
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
A = state0;
B = state1;
C = state2;
D = state3;
E = state4;
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] );
P( E, A, B, C, D, R(16) );
P( D, E, A, B, C, R(17) );
P( C, D, E, A, B, R(18) );
P( B, C, D, E, A, R(19) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, R(20) );
P( E, A, B, C, D, R(21) );
P( D, E, A, B, C, R(22) );
P( C, D, E, A, B, R(23) );
P( B, C, D, E, A, R(24) );
P( A, B, C, D, E, R(25) );
P( E, A, B, C, D, R(26) );
P( D, E, A, B, C, R(27) );
P( C, D, E, A, B, R(28) );
P( B, C, D, E, A, R(29) );
P( A, B, C, D, E, R(30) );
P( E, A, B, C, D, R(31) );
P( D, E, A, B, C, R(32) );
P( C, D, E, A, B, R(33) );
P( B, C, D, E, A, R(34) );
P( A, B, C, D, E, R(35) );
P( E, A, B, C, D, R(36) );
P( D, E, A, B, C, R(37) );
P( C, D, E, A, B, R(38) );
P( B, C, D, E, A, R(39) );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, R(40) );
P( E, A, B, C, D, R(41) );
P( D, E, A, B, C, R(42) );
P( C, D, E, A, B, R(43) );
P( B, C, D, E, A, R(44) );
P( A, B, C, D, E, R(45) );
P( E, A, B, C, D, R(46) );
P( D, E, A, B, C, R(47) );
P( C, D, E, A, B, R(48) );
P( B, C, D, E, A, R(49) );
P( A, B, C, D, E, R(50) );
P( E, A, B, C, D, R(51) );
P( D, E, A, B, C, R(52) );
P( C, D, E, A, B, R(53) );
P( B, C, D, E, A, R(54) );
P( A, B, C, D, E, R(55) );
P( E, A, B, C, D, R(56) );
P( D, E, A, B, C, R(57) );
P( C, D, E, A, B, R(58) );
P( B, C, D, E, A, R(59) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, R(60) );
P( E, A, B, C, D, R(61) );
P( D, E, A, B, C, R(62) );
P( C, D, E, A, B, R(63) );
P( B, C, D, E, A, R(64) );
P( A, B, C, D, E, R(65) );
P( E, A, B, C, D, R(66) );
P( D, E, A, B, C, R(67) );
P( C, D, E, A, B, R(68) );
P( B, C, D, E, A, R(69) );
P( A, B, C, D, E, R(70) );
P( E, A, B, C, D, R(71) );
P( D, E, A, B, C, R(72) );
P( C, D, E, A, B, R(73) );
P( B, C, D, E, A, R(74) );
P( A, B, C, D, E, R(75) );
P( E, A, B, C, D, R(76) );
P( D, E, A, B, C, R(77) );
P( C, D, E, A, B, R(78) );
P( B, C, D, E, A, R(79) );
#undef K
#undef F
state0 += A;
state1 += B;
state2 += C;
state3 += D;
state4 += E;
}
/* Got the hash, store it in the output buffer. */
PUT_UINT32_BE( state0, output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_BE( state1, output, 4 );
PUT_UINT32_BE( state2, output, 8 );
PUT_UINT32_BE( state3, output, 12 );
PUT_UINT32_BE( state4, output, 16 );
#endif
}
#endif
/*--------------------------------------------------------------------------
GLOBAL FUNCTIONS
--------------------------------------------------------------------------*/
/*===========================================================================
FUNCTION SHA1
DESCRIPTION
Main sha1 hash function
DEPENDENCIES
GPU must be initialized
RETURN VALUE
output: the hash result
===========================================================================*/
__global__
void sha1( unsigned char *input, int chunkSize, int totalThreads,
int padSize, unsigned char *scratch ) {
// get the current thread index
int threadIndex = threadIdx.x + blockDim.x * blockIdx.x;
int chunkIndex = threadIndex * chunkSize;
int hashIndex = threadIndex * SHA1_HASH_SIZE;
if(threadIndex >= totalThreads)
return;
if ((threadIndex == (totalThreads - 1)) && (padSize > 0)) {
for(int i = 0 ; i < padSize ; i++)
input[chunkIndex + chunkSize - padSize + i] = 0;
}
#ifdef FEATURE_SHARED_MEMORY
__shared__ unsigned int sharedMemory[4 * 1024 - 32];
unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512);
unsigned char *tempInput = input + chunkIndex;
unsigned int *inputIndex = (unsigned int *)(tempInput);
sha1_internal(inputIndex, sharedMemoryIndex, chunkSize,
scratch + hashIndex );
#else
sha1_internal(input + chunkIndex, chunkSize, scratch + hashIndex );
#endif /* FEATURE_SHARED_MEMORY */
}
__global__
void sha1_overlap( unsigned char *input, int chunkSize, int offset,
int totalThreads, int padSize, unsigned char *output ) {
int threadIndex = threadIdx.x + blockDim.x * blockIdx.x;
int chunkIndex = threadIndex * offset;
int hashIndex = threadIndex * SHA1_HASH_SIZE;
if(threadIndex >= totalThreads)
return;
if ((threadIndex == (totalThreads - 1))) {
chunkSize-= padSize;
}
#ifdef FEATURE_SHARED_MEMORY
__shared__ unsigned int sharedMemory[4 * 1024 - 32];
//NOTE : SAMER : this can exceed the size of the shared memory
unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512);
unsigned int *inputIndex = (unsigned int *)(input + chunkIndex);
sha1_internal_overlap(inputIndex, sharedMemoryIndex, chunkSize,
output + hashIndex );
#else
sha1_internal(input + chunkIndex, chunkSize, output + hashIndex );
#endif /* FEATURE_SHARED_MEMORY */
}
| f71f92e5cda67d13f18494ee14152e35dc0d75c9.cu | /*==========================================================================
SHA1 KERNEL
* Copyright (c) 2008, NetSysLab at the University of British Columbia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NetSysLab ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NetSysLab BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
DESCRIPTION
CPU version of the storeGPU library.
==========================================================================*/
/*==========================================================================
INCLUDES
==========================================================================*/
#include <string.h>
#include <stdio.h>
#include "cust.h"
/*==========================================================================
DATA DECLARATIONS
==========================================================================*/
/*--------------------------------------------------------------------------
TYPE DEFINITIONS
--------------------------------------------------------------------------*/
typedef struct {
unsigned long total[2]; /*!< number of bytes processed */
unsigned long state[5]; /*!< intermediate digest state */
unsigned char buffer[64]; /*!< data block being processed */
} sha1_context;
/*--------------------------------------------------------------------------
FUNCTION PROTOTYPES
--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
CONSTANTS
--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
GLOBAL VARIABLES
--------------------------------------------------------------------------*/
__device__
static const unsigned char sha1_padding[64] =
{
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/*--------------------------------------------------------------------------
MACROS
--------------------------------------------------------------------------*/
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE 1
#endif
/*
* 32-bit integer manipulation macros (big endian)
*/
#ifndef GET_UINT32_BE
#define GET_UINT32_BE(n,b,i) \
{ \
(n) = ( (unsigned long) (b)[(i) ] << 24 ) \
| ( (unsigned long) (b)[(i) + 1] << 16 ) \
| ( (unsigned long) (b)[(i) + 2] << 8 ) \
| ( (unsigned long) (b)[(i) + 3] ); \
}
#endif
#ifndef PUT_UINT32_BE
#define PUT_UINT32_BE(n,b,i) \
{ \
(b)[(i) ] = (unsigned char) ( (n) >> 24 ); \
(b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \
(b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \
(b)[(i) + 3] = (unsigned char) ( (n) ); \
}
#endif
#ifdef FEATURE_SHARED_MEMORY
// current thread stride.
#undef SHARED_MEMORY_INDEX
#define SHARED_MEMORY_INDEX(index) (32 * (index) + (threadIdx.x & 0x1F))
#endif /* FEATURE_SHARED_MEMORY */
/*--------------------------------------------------------------------------
LOCAL FUNCTIONS
--------------------------------------------------------------------------*/
#ifndef FEATURE_SHARED_MEMORY
/*
* SHA-1 context setup
*/
/*===========================================================================
FUNCTION SHA1_GPU_STARTS
DESCRIPTION
SHA-1 context setup
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_starts( sha1_context *ctx ) {
ctx->total[0] = 0;
ctx->total[1] = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0;
}
/*===========================================================================
FUNCTION SHA1_GPU_PROCESS
DESCRIPTION
SHA1 process buffer
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_process( sha1_context *ctx, unsigned char data[64] ) {
unsigned long temp, W[16], A, B, C, D, E;
GET_UINT32_BE( W[ 0], data, 0 );
GET_UINT32_BE( W[ 1], data, 4 );
GET_UINT32_BE( W[ 2], data, 8 );
GET_UINT32_BE( W[ 3], data, 12 );
GET_UINT32_BE( W[ 4], data, 16 );
GET_UINT32_BE( W[ 5], data, 20 );
GET_UINT32_BE( W[ 6], data, 24 );
GET_UINT32_BE( W[ 7], data, 28 );
GET_UINT32_BE( W[ 8], data, 32 );
GET_UINT32_BE( W[ 9], data, 36 );
GET_UINT32_BE( W[10], data, 40 );
GET_UINT32_BE( W[11], data, 44 );
GET_UINT32_BE( W[12], data, 48 );
GET_UINT32_BE( W[13], data, 52 );
GET_UINT32_BE( W[14], data, 56 );
GET_UINT32_BE( W[15], data, 60 );
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef R
#define R(t) \
( \
temp = W[(t - 3) & 0x0F] ^ W[(t - 8) & 0x0F] ^ \
W[(t - 14) & 0x0F] ^ W[ t & 0x0F], \
( W[t & 0x0F] = S(temp,1) ) \
)
#undef P
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, R(16) );
P( D, E, A, B, C, R(17) );
P( C, D, E, A, B, R(18) );
P( B, C, D, E, A, R(19) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, R(20) );
P( E, A, B, C, D, R(21) );
P( D, E, A, B, C, R(22) );
P( C, D, E, A, B, R(23) );
P( B, C, D, E, A, R(24) );
P( A, B, C, D, E, R(25) );
P( E, A, B, C, D, R(26) );
P( D, E, A, B, C, R(27) );
P( C, D, E, A, B, R(28) );
P( B, C, D, E, A, R(29) );
P( A, B, C, D, E, R(30) );
P( E, A, B, C, D, R(31) );
P( D, E, A, B, C, R(32) );
P( C, D, E, A, B, R(33) );
P( B, C, D, E, A, R(34) );
P( A, B, C, D, E, R(35) );
P( E, A, B, C, D, R(36) );
P( D, E, A, B, C, R(37) );
P( C, D, E, A, B, R(38) );
P( B, C, D, E, A, R(39) );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, R(40) );
P( E, A, B, C, D, R(41) );
P( D, E, A, B, C, R(42) );
P( C, D, E, A, B, R(43) );
P( B, C, D, E, A, R(44) );
P( A, B, C, D, E, R(45) );
P( E, A, B, C, D, R(46) );
P( D, E, A, B, C, R(47) );
P( C, D, E, A, B, R(48) );
P( B, C, D, E, A, R(49) );
P( A, B, C, D, E, R(50) );
P( E, A, B, C, D, R(51) );
P( D, E, A, B, C, R(52) );
P( C, D, E, A, B, R(53) );
P( B, C, D, E, A, R(54) );
P( A, B, C, D, E, R(55) );
P( E, A, B, C, D, R(56) );
P( D, E, A, B, C, R(57) );
P( C, D, E, A, B, R(58) );
P( B, C, D, E, A, R(59) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, R(60) );
P( E, A, B, C, D, R(61) );
P( D, E, A, B, C, R(62) );
P( C, D, E, A, B, R(63) );
P( B, C, D, E, A, R(64) );
P( A, B, C, D, E, R(65) );
P( E, A, B, C, D, R(66) );
P( D, E, A, B, C, R(67) );
P( C, D, E, A, B, R(68) );
P( B, C, D, E, A, R(69) );
P( A, B, C, D, E, R(70) );
P( E, A, B, C, D, R(71) );
P( D, E, A, B, C, R(72) );
P( C, D, E, A, B, R(73) );
P( B, C, D, E, A, R(74) );
P( A, B, C, D, E, R(75) );
P( E, A, B, C, D, R(76) );
P( D, E, A, B, C, R(77) );
P( C, D, E, A, B, R(78) );
P( B, C, D, E, A, R(79) );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
/*===========================================================================
FUNCTION SHA1_CPU_UPDATE
DESCRIPTION
SHA1 update buffer
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_update( sha1_context *ctx, unsigned char *input, int ilen ) {
int fill;
unsigned long left;
if( ilen <= 0 )
return;
left = ctx->total[0] & 0x3F;
fill = 64 - left;
ctx->total[0] += ilen;
ctx->total[0] &= 0xFFFFFFFF;
if ( ctx->total[0] < (unsigned long) ilen )
ctx->total[1]++;
if ( left && ilen >= fill ) {
/*memcpy( (void *) (ctx->buffer + left),
(void *) input, fill );*/
for (int i = 0; i < fill; i++) {
ctx->buffer[i+left] = input[i];
}
sha1_process( ctx, ctx->buffer );
input += fill;
ilen -= fill;
left = 0;
}
while ( ilen >= 64 ) {
sha1_process( ctx, input );
input += 64;
ilen -= 64;
}
if ( ilen > 0 ) {
/*memcpy( (void *) (ctx->buffer + left),
(void *) input, ilen );*/
for (int i = 0; i < ilen; i++) {
ctx->buffer[i+left] = input[i];
}
}
}
/*===========================================================================
FUNCTION SHA1_CPU_FINISH
DESCRIPTION
SHA1 final digest
DEPENDENCIES
None
RETURN VALUE
None
===========================================================================*/
__device__
void sha1_finish( sha1_context *ctx, unsigned char *output ) {
unsigned long last, padn;
unsigned long high, low;
unsigned char msglen[8];
high = ( ctx->total[0] >> 29 )
| ( ctx->total[1] << 3 );
low = ( ctx->total[0] << 3 );
PUT_UINT32_BE( high, msglen, 0 );
PUT_UINT32_BE( low, msglen, 4 );
last = ctx->total[0] & 0x3F;
padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last );
sha1_update( ctx, (unsigned char *) sha1_padding, padn );
sha1_update( ctx, msglen, 8 );
PUT_UINT32_BE( ctx->state[0], output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_BE( ctx->state[1], output, 4 );
PUT_UINT32_BE( ctx->state[2], output, 8 );
PUT_UINT32_BE( ctx->state[3], output, 12 );
PUT_UINT32_BE( ctx->state[4], output, 16 );
#endif
}
/*===========================================================================
FUNCTION SHA1_INTERNAL
DESCRIPTION
Does the real sha1 algorithm
DEPENDENCIES
None
RETURN VALUE
output is the hash result
===========================================================================*/
__device__
void sha1_internal( unsigned char *input, int ilen,
unsigned char *output ) {
sha1_context ctx;
sha1_starts( &ctx );
sha1_update( &ctx, input, ilen );
sha1_finish( &ctx, output );
memset( &ctx, 0, sizeof( sha1_context ) );
}
#endif
#ifdef FEATURE_SHARED_MEMORY
/*===========================================================================
FUNCTION SHA1_INTERNAL
DESCRIPTION
Does the real sha1 algorithm.
DEPENDENCIES
None
RETURN VALUE
output is the hash result
===========================================================================*/
__device__
unsigned long macroRFunction(int t, unsigned int *sharedMemory) {
return sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^
sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)];
}
__device__
static void sha1_internal( unsigned int *input, unsigned int *sharedMemory,
unsigned int chunkSize, unsigned char *output ) {
/* Number of passes (512 bit blocks) we have to do */
int numberOfPasses = chunkSize / 64 + 1;
/* Used during the hashing process */
unsigned long temp, A, B, C, D ,E;
//unsigned long shared14, shared15;
/* Needed to do the little endian stuff */
unsigned char *data = (unsigned char *)sharedMemory;
/* Will hold the hash value through the
intermediate stages of SHA1 algorithm */
unsigned int state0 = 0x67452301;
unsigned int state1 = 0xEFCDAB89;
unsigned int state2 = 0x98BADCFE;
unsigned int state3 = 0x10325476;
unsigned int state4 = 0xC3D2E1F0;
/* int x0 = SHARED_MEMORY_INDEX(0);
int x1 = SHARED_MEMORY_INDEX(1);
int x2 = SHARED_MEMORY_INDEX(2);
int x3 = SHARED_MEMORY_INDEX(3);
int x4 = SHARED_MEMORY_INDEX(4);
int x5 = SHARED_MEMORY_INDEX(5);
int x6 = SHARED_MEMORY_INDEX(6);
int x7 = SHARED_MEMORY_INDEX(7);
int x8 = SHARED_MEMORY_INDEX(8);
int x9 = SHARED_MEMORY_INDEX(9);
int x10 = SHARED_MEMORY_INDEX(10);
int x11 = SHARED_MEMORY_INDEX(11);
int x12 = SHARED_MEMORY_INDEX(12);
int x13 = SHARED_MEMORY_INDEX(13);
int x14 = SHARED_MEMORY_INDEX(14);
int x15 = SHARED_MEMORY_INDEX(15);
*/
#undef GET_CACHED_INDEX
#define GET_CACHED_INDEX(index) SHARED_MEMORY_INDEX(index)//(x##index)
for( int index = 0 ; index < (numberOfPasses) ; index++ ) {
/* Move data to the thread's shared memory space */
sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index];
sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index];
sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index];
sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index];
sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index];
sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index];
sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index];
sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index];
sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index];
sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index];
sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index];
sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index];
sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index];
/* Testing the code with and without this if statement shows that
it has no effect on performance. */
if(index == numberOfPasses -1 ) {
/* The last pass will contain the size of the chunk size (according to
official SHA1 algorithm). */
sharedMemory[GET_CACHED_INDEX(13)] = 0x00000080;
PUT_UINT32_BE( chunkSize >> 29,
data, GET_CACHED_INDEX(14) * 4 );
PUT_UINT32_BE( chunkSize << 3,
data, GET_CACHED_INDEX(15) * 4 );
}
else {
sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index];
sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index];
sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index];
}
/* Get the little endian stuff done. */
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)],
data, GET_CACHED_INDEX(0) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)],
data, GET_CACHED_INDEX(1) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)],
data, GET_CACHED_INDEX(2) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)],
data, GET_CACHED_INDEX(3) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)],
data, GET_CACHED_INDEX(4) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)],
data, GET_CACHED_INDEX(5) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)],
data, GET_CACHED_INDEX(6) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)],
data, GET_CACHED_INDEX(7) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)],
data, GET_CACHED_INDEX(8) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)],
data, GET_CACHED_INDEX(9) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)],
data, GET_CACHED_INDEX(10) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)],
data, GET_CACHED_INDEX(11) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)],
data, GET_CACHED_INDEX(12) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)],
data, GET_CACHED_INDEX(13) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)],
data, GET_CACHED_INDEX(14) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)],
data, GET_CACHED_INDEX(15) * 4 );
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef R
#define R(t) \
( \
temp = macroRFunction(t, sharedMemory) , \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
/*
#define R(t) \
( \
temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \
sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
*/
#undef P
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
A = state0;
B = state1;
C = state2;
D = state3;
E = state4;
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] );
P( E, A, B, C, D, R(16) );
P( D, E, A, B, C, R(17) );
P( C, D, E, A, B, R(18) );
P( B, C, D, E, A, R(19) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, R(20) );
P( E, A, B, C, D, R(21) );
P( D, E, A, B, C, R(22) );
P( C, D, E, A, B, R(23) );
P( B, C, D, E, A, R(24) );
P( A, B, C, D, E, R(25) );
P( E, A, B, C, D, R(26) );
P( D, E, A, B, C, R(27) );
P( C, D, E, A, B, R(28) );
P( B, C, D, E, A, R(29) );
P( A, B, C, D, E, R(30) );
P( E, A, B, C, D, R(31) );
P( D, E, A, B, C, R(32) );
P( C, D, E, A, B, R(33) );
P( B, C, D, E, A, R(34) );
P( A, B, C, D, E, R(35) );
P( E, A, B, C, D, R(36) );
P( D, E, A, B, C, R(37) );
P( C, D, E, A, B, R(38) );
P( B, C, D, E, A, R(39) );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, R(40) );
P( E, A, B, C, D, R(41) );
P( D, E, A, B, C, R(42) );
P( C, D, E, A, B, R(43) );
P( B, C, D, E, A, R(44) );
P( A, B, C, D, E, R(45) );
P( E, A, B, C, D, R(46) );
P( D, E, A, B, C, R(47) );
P( C, D, E, A, B, R(48) );
P( B, C, D, E, A, R(49) );
P( A, B, C, D, E, R(50) );
P( E, A, B, C, D, R(51) );
P( D, E, A, B, C, R(52) );
P( C, D, E, A, B, R(53) );
P( B, C, D, E, A, R(54) );
P( A, B, C, D, E, R(55) );
P( E, A, B, C, D, R(56) );
P( D, E, A, B, C, R(57) );
P( C, D, E, A, B, R(58) );
P( B, C, D, E, A, R(59) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, R(60) );
P( E, A, B, C, D, R(61) );
P( D, E, A, B, C, R(62) );
P( C, D, E, A, B, R(63) );
P( B, C, D, E, A, R(64) );
P( A, B, C, D, E, R(65) );
P( E, A, B, C, D, R(66) );
P( D, E, A, B, C, R(67) );
P( C, D, E, A, B, R(68) );
P( B, C, D, E, A, R(69) );
P( A, B, C, D, E, R(70) );
P( E, A, B, C, D, R(71) );
P( D, E, A, B, C, R(72) );
P( C, D, E, A, B, R(73) );
P( B, C, D, E, A, R(74) );
P( A, B, C, D, E, R(75) );
P( E, A, B, C, D, R(76) );
P( D, E, A, B, C, R(77) );
P( C, D, E, A, B, R(78) );
P( B, C, D, E, A, R(79) );
#undef K
#undef F
state0 += A;
state1 += B;
state2 += C;
state3 += D;
state4 += E;
}
/* Got the hash, store it in the output buffer. */
PUT_UINT32_BE( state0, output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_BE( state1, output, 4 );
PUT_UINT32_BE( state2, output, 8 );
PUT_UINT32_BE( state3, output, 12 );
PUT_UINT32_BE( state4, output, 16 );
#endif
}
__device__
static void sha1_internal_overlap( unsigned int *input, unsigned int *sharedMemory,
unsigned int chunkSize, unsigned char *output ) {
/* Number of passes (512 bit blocks) we have to do */
int numberOfPasses = chunkSize / 64 + 1;
/* Used during the hashing process */
unsigned long temp, A, B, C, D ,E;
//unsigned long shared14, shared15;
/* Needed to do the big endian stuff */
unsigned char *data = (unsigned char *)sharedMemory;
// number of padding bytes.
int numPadBytes = 0;
int numPadInt = 0;
//int numPadRemain = 0;
/* Will hold the hash value through the
intermediate stages of SHA1 algorithm */
unsigned int state0 = 0x67452301;
unsigned int state1 = 0xEFCDAB89;
unsigned int state2 = 0x98BADCFE;
unsigned int state3 = 0x10325476;
unsigned int state4 = 0xC3D2E1F0;
int x0 = SHARED_MEMORY_INDEX(0);
int x1 = SHARED_MEMORY_INDEX(1);
int x2 = SHARED_MEMORY_INDEX(2);
int x3 = SHARED_MEMORY_INDEX(3);
int x4 = SHARED_MEMORY_INDEX(4);
int x5 = SHARED_MEMORY_INDEX(5);
int x6 = SHARED_MEMORY_INDEX(6);
int x7 = SHARED_MEMORY_INDEX(7);
int x8 = SHARED_MEMORY_INDEX(8);
int x9 = SHARED_MEMORY_INDEX(9);
int x10 = SHARED_MEMORY_INDEX(10);
int x11 = SHARED_MEMORY_INDEX(11);
int x12 = SHARED_MEMORY_INDEX(12);
int x13 = SHARED_MEMORY_INDEX(13);
int x14 = SHARED_MEMORY_INDEX(14);
int x15 = SHARED_MEMORY_INDEX(15);
#undef GET_CACHED_INDEX
#define GET_CACHED_INDEX(index) (x##index)
for( int index = 0 ; index < (numberOfPasses) ; index++ ) {
if(index == numberOfPasses -1 ){
numPadBytes = (64-12) - (chunkSize - (numberOfPasses-1)*64);
numPadInt = numPadBytes/sizeof(int);
/*numPadRemain = numPadBytes-numPadInt*sizeof(int);
printf("\nLast loop chunkSize = %d, numberOfPasses= %d and \nnumPadBytes = %d, numPadInt =%d, numPadRemain = %d\n",
chunkSize,numberOfPasses,numPadBytes,numPadInt,numPadRemain);*/
int i=0;
for(i=0;i<numPadInt;i++){
sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0;
}
int j=0;
for(j=0;j<(16-3-numPadInt);j++){
//printf("j= %d\n",j);
sharedMemory[SHARED_MEMORY_INDEX(j)] = input[j + 16 * index];
}
/* The last pass will contain the size of the chunk size (according to
official SHA1 algorithm). */
sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0x00000080;
//printf("the last one at %d\n",13-i);
PUT_UINT32_BE( chunkSize >> 29,
data, GET_CACHED_INDEX(14) * 4 );
PUT_UINT32_BE( chunkSize << 3,
data, GET_CACHED_INDEX(15) * 4 );
}
else{
/* Move data to the thread's shared memory space */
//printf("Not last loop\n");
sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index];
sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index];
sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index];
sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index];
sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index];
sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index];
sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index];
sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index];
sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index];
sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index];
sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index];
sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index];
sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index];
sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index];
sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index];
sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index];
}
/* int k=0;
printf("\nGPU DATA\n");
for(k=0;k<16;k++){
printf("%d\t",sharedMemory[SHARED_MEMORY_INDEX(k)]);
}
printf("\n\n");*/
/* Get the little endian stuff done. */
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(0)],
data, GET_CACHED_INDEX(0) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(1)],
data, GET_CACHED_INDEX(1) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(2)],
data, GET_CACHED_INDEX(2) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(3)],
data, GET_CACHED_INDEX(3) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(4)],
data, GET_CACHED_INDEX(4) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(5)],
data, GET_CACHED_INDEX(5) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(6)],
data, GET_CACHED_INDEX(6) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(7)],
data, GET_CACHED_INDEX(7) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(8)],
data, GET_CACHED_INDEX(8) * 4 );
GET_UINT32_BE( sharedMemory[ GET_CACHED_INDEX(9)],
data, GET_CACHED_INDEX(9) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(10)],
data, GET_CACHED_INDEX(10) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(11)],
data, GET_CACHED_INDEX(11) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(12)],
data, GET_CACHED_INDEX(12) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(13)],
data, GET_CACHED_INDEX(13) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(14)],
data, GET_CACHED_INDEX(14) * 4 );
GET_UINT32_BE( sharedMemory[GET_CACHED_INDEX(15)],
data, GET_CACHED_INDEX(15) * 4 );
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef R
#define R(t) \
( \
temp = macroRFunction(t, sharedMemory) , \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
/*
#define R(t) \
( \
temp = sharedMemory[SHARED_MEMORY_INDEX((t - 3) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX((t - 8) & 0x0F)] ^ \
sharedMemory[SHARED_MEMORY_INDEX((t - 14) & 0x0F)] ^ sharedMemory[SHARED_MEMORY_INDEX( t & 0x0F)], \
( sharedMemory[SHARED_MEMORY_INDEX(t & 0x0F)] = S(temp,1) ) \
)
*/
#undef P
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
A = state0;
B = state1;
C = state2;
D = state3;
E = state4;
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(0)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(1)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(2)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(3)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(4)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(5)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(6)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(7)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(8)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(9)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(10)] );
P( E, A, B, C, D, sharedMemory[ GET_CACHED_INDEX(11)] );
P( D, E, A, B, C, sharedMemory[ GET_CACHED_INDEX(12)] );
P( C, D, E, A, B, sharedMemory[ GET_CACHED_INDEX(13)] );
P( B, C, D, E, A, sharedMemory[ GET_CACHED_INDEX(14)] );
P( A, B, C, D, E, sharedMemory[ GET_CACHED_INDEX(15)] );
P( E, A, B, C, D, R(16) );
P( D, E, A, B, C, R(17) );
P( C, D, E, A, B, R(18) );
P( B, C, D, E, A, R(19) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, R(20) );
P( E, A, B, C, D, R(21) );
P( D, E, A, B, C, R(22) );
P( C, D, E, A, B, R(23) );
P( B, C, D, E, A, R(24) );
P( A, B, C, D, E, R(25) );
P( E, A, B, C, D, R(26) );
P( D, E, A, B, C, R(27) );
P( C, D, E, A, B, R(28) );
P( B, C, D, E, A, R(29) );
P( A, B, C, D, E, R(30) );
P( E, A, B, C, D, R(31) );
P( D, E, A, B, C, R(32) );
P( C, D, E, A, B, R(33) );
P( B, C, D, E, A, R(34) );
P( A, B, C, D, E, R(35) );
P( E, A, B, C, D, R(36) );
P( D, E, A, B, C, R(37) );
P( C, D, E, A, B, R(38) );
P( B, C, D, E, A, R(39) );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, R(40) );
P( E, A, B, C, D, R(41) );
P( D, E, A, B, C, R(42) );
P( C, D, E, A, B, R(43) );
P( B, C, D, E, A, R(44) );
P( A, B, C, D, E, R(45) );
P( E, A, B, C, D, R(46) );
P( D, E, A, B, C, R(47) );
P( C, D, E, A, B, R(48) );
P( B, C, D, E, A, R(49) );
P( A, B, C, D, E, R(50) );
P( E, A, B, C, D, R(51) );
P( D, E, A, B, C, R(52) );
P( C, D, E, A, B, R(53) );
P( B, C, D, E, A, R(54) );
P( A, B, C, D, E, R(55) );
P( E, A, B, C, D, R(56) );
P( D, E, A, B, C, R(57) );
P( C, D, E, A, B, R(58) );
P( B, C, D, E, A, R(59) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, R(60) );
P( E, A, B, C, D, R(61) );
P( D, E, A, B, C, R(62) );
P( C, D, E, A, B, R(63) );
P( B, C, D, E, A, R(64) );
P( A, B, C, D, E, R(65) );
P( E, A, B, C, D, R(66) );
P( D, E, A, B, C, R(67) );
P( C, D, E, A, B, R(68) );
P( B, C, D, E, A, R(69) );
P( A, B, C, D, E, R(70) );
P( E, A, B, C, D, R(71) );
P( D, E, A, B, C, R(72) );
P( C, D, E, A, B, R(73) );
P( B, C, D, E, A, R(74) );
P( A, B, C, D, E, R(75) );
P( E, A, B, C, D, R(76) );
P( D, E, A, B, C, R(77) );
P( C, D, E, A, B, R(78) );
P( B, C, D, E, A, R(79) );
#undef K
#undef F
state0 += A;
state1 += B;
state2 += C;
state3 += D;
state4 += E;
}
/* Got the hash, store it in the output buffer. */
PUT_UINT32_BE( state0, output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_BE( state1, output, 4 );
PUT_UINT32_BE( state2, output, 8 );
PUT_UINT32_BE( state3, output, 12 );
PUT_UINT32_BE( state4, output, 16 );
#endif
}
#endif
/*--------------------------------------------------------------------------
GLOBAL FUNCTIONS
--------------------------------------------------------------------------*/
/*===========================================================================
FUNCTION SHA1
DESCRIPTION
Main sha1 hash function
DEPENDENCIES
GPU must be initialized
RETURN VALUE
output: the hash result
===========================================================================*/
__global__
void sha1( unsigned char *input, int chunkSize, int totalThreads,
int padSize, unsigned char *scratch ) {
// get the current thread index
int threadIndex = threadIdx.x + blockDim.x * blockIdx.x;
int chunkIndex = threadIndex * chunkSize;
int hashIndex = threadIndex * SHA1_HASH_SIZE;
if(threadIndex >= totalThreads)
return;
if ((threadIndex == (totalThreads - 1)) && (padSize > 0)) {
for(int i = 0 ; i < padSize ; i++)
input[chunkIndex + chunkSize - padSize + i] = 0;
}
#ifdef FEATURE_SHARED_MEMORY
__shared__ unsigned int sharedMemory[4 * 1024 - 32];
unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512);
unsigned char *tempInput = input + chunkIndex;
unsigned int *inputIndex = (unsigned int *)(tempInput);
sha1_internal(inputIndex, sharedMemoryIndex, chunkSize,
scratch + hashIndex );
#else
sha1_internal(input + chunkIndex, chunkSize, scratch + hashIndex );
#endif /* FEATURE_SHARED_MEMORY */
}
__global__
void sha1_overlap( unsigned char *input, int chunkSize, int offset,
int totalThreads, int padSize, unsigned char *output ) {
int threadIndex = threadIdx.x + blockDim.x * blockIdx.x;
int chunkIndex = threadIndex * offset;
int hashIndex = threadIndex * SHA1_HASH_SIZE;
if(threadIndex >= totalThreads)
return;
if ((threadIndex == (totalThreads - 1))) {
chunkSize-= padSize;
}
#ifdef FEATURE_SHARED_MEMORY
__shared__ unsigned int sharedMemory[4 * 1024 - 32];
//NOTE : SAMER : this can exceed the size of the shared memory
unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512);
unsigned int *inputIndex = (unsigned int *)(input + chunkIndex);
sha1_internal_overlap(inputIndex, sharedMemoryIndex, chunkSize,
output + hashIndex );
#else
sha1_internal(input + chunkIndex, chunkSize, output + hashIndex );
#endif /* FEATURE_SHARED_MEMORY */
}
|
d4afa3ecd06283ea24a223093e92381cb0f3af98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include <iostream>
#include <set>
#include "utils.hpp"
#include "options.hpp"
#include "b7.cuh"
/////////////////////////////
/////////////////////////////
namespace chrono = std::chrono;
using clock_type = chrono::high_resolution_clock;
/////////////////////////////
/////////////////////////////
void random_coo(int* x, int *y, int *val, int N, int degree) {
for (int i = 0; i < N; i++) {
std::set<int> edges;
while (edges.size() < degree) {
edges.insert(rand() % N);
}
int j = 0;
for (auto iter = edges.begin(); iter != edges.end(); iter++, j++) {
x[i * degree + j] = i;
y[i * degree + j] = *iter;
val[i * degree + j] = 1;
}
}
}
void reset(float *auth1, float *auth2, float *hub1, float* hub2, float *auth_norm, float *hub_norm, int N) {
for (int i = 0; i < N; i++) {
auth1[i] = 1;
auth2[i] = 1;
hub1[i] = 1;
hub2[i] = 1;
}
auth_norm[0] = 0;
hub_norm[0] = 0;
// hipMemPrefetchAsync(auth1, N * sizeof(float), 0);
// hipMemPrefetchAsync(auth2, N * sizeof(float), 0);
// hipMemPrefetchAsync(hub1, N * sizeof(float), 0);
// hipMemPrefetchAsync(hub2, N * sizeof(float), 0);
// hipMemPrefetchAsync(auth_norm, sizeof(float), 0);
// hipMemPrefetchAsync(hub_norm, sizeof(float), 0);
// hipDeviceSynchronize();
}
/////////////////////////////
/////////////////////////////
int main(int argc, char *argv[]) {
srand(time(0));
Options options = Options(argc, argv);
int debug = options.debug;
int num_executions = options.num_iter;
int N = options.N;
int degree = 3;
int iterations = 5;
int block_size = options.block_size_1d;
int num_blocks = options.num_blocks;
int skip_iterations = options.skip_iterations;
int err = 0;
int nnz = degree * N;
if (debug) {
std::cout << "running b7 sync" << std::endl;
std::cout << "N=" << N << std::endl;
std::cout << "num executions=" << num_executions << std::endl;
std::cout << "block size 1d=" << block_size << std::endl;
std::cout << "num blocks=" << num_blocks << std::endl;
std::cout << "skip iteration time=" << skip_iterations << std::endl;
}
auto start = clock_type::now();
int *ptr, *idx, *val, *ptr2, *idx2, *val2, *rowCounter1, *rowCounter2;
int *ptr_tmp, *idx_tmp, *val_tmp, *ptr2_tmp, *idx2_tmp, *val2_tmp;
float *auth1, *auth2, *hub1, *hub2, *auth_norm, *hub_norm;
// Use temporary CPU vectors to simplify reinitialization at each benchmark execution;
ptr_tmp = (int *) malloc(sizeof(int) * (N + 1));
ptr2_tmp = (int *) malloc(sizeof(int) * (N + 1));
idx_tmp = (int *) malloc(sizeof(int) * nnz);
idx2_tmp = (int *) malloc(sizeof(int) * nnz);
val_tmp = (int *) malloc(sizeof(int) * nnz);
val2_tmp = (int *) malloc(sizeof(int) * nnz);
err = hipMallocManaged(&ptr, sizeof(int) * (N + 1));
err = hipMallocManaged(&ptr2, sizeof(int) * (N + 1));
err = hipMallocManaged(&idx, sizeof(int) * nnz);
err = hipMallocManaged(&idx2, sizeof(int) * nnz);
err = hipMallocManaged(&val, sizeof(int) * nnz);
err = hipMallocManaged(&val2, sizeof(int) * nnz);
err = hipMallocManaged(&rowCounter1, sizeof(int));
err = hipMallocManaged(&rowCounter2, sizeof(int));
err = hipMallocManaged(&auth1, sizeof(float) * N);
err = hipMallocManaged(&auth2, sizeof(float) * N);
err = hipMallocManaged(&hub1, sizeof(float) * N);
err = hipMallocManaged(&hub2, sizeof(float) * N);
err = hipMallocManaged(&auth_norm, sizeof(float));
err = hipMallocManaged(&hub_norm, sizeof(float));
if (debug && err) std::cout << err << std::endl;
// Initialze arrays;
// Create a random COO;
int *x = (int*) malloc(nnz * sizeof(int));
int *y = (int*) malloc(nnz * sizeof(int));
int *v = (int*) malloc(nnz * sizeof(int));
random_coo(x, y, v, N, degree);
// Create a CSR;
coo2csr(ptr_tmp, idx_tmp, val_tmp, x, y, v, N, N, nnz);
coo2csr(ptr2_tmp, idx2_tmp, val2_tmp, y, x, v, N, N, nnz);
auto end = clock_type::now();
if (debug) std::cout << "init=" << (float) chrono::duration_cast<chrono::microseconds>(end - start).count() / 1000 << " ms" << std::endl;
// Print header;
if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl;
float tot = 0;
for (int i = 0; i < num_executions; i++) {
if (debug) std::cout << "\n-- iter=" << i << std::endl;
auto start_tmp = clock_type::now();
for (int j = 0; j < nnz; j++) {
idx[j] = idx_tmp[j];
idx2[j] = idx2_tmp[j];
val[j] = val_tmp[j];
val2[j] = val2_tmp[j];
}
for (int j = 0; j < N + 1; j++) {
ptr[j] = ptr_tmp[j];
ptr2[j] = ptr2_tmp[j];
}
reset(auth1, auth2, hub1, hub2, auth_norm, hub_norm, N);
rowCounter1[0] = 0;
rowCounter2[0] = 0;
auto end_tmp = clock_type::now();
auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count();
if (debug) std::cout << " reset=" << (float) reset_time / 1000 << " ms" << std::endl;
start = clock_type::now();
for (int iter = 0; iter < iterations; iter++) {
// hipMemPrefetchAsync(auth1, N * sizeof(float), 0);
// hipMemPrefetchAsync(auth2, N * sizeof(float), 0);
// hipMemPrefetchAsync(hub1, N * sizeof(float), 0);
// hipMemPrefetchAsync(hub2, N * sizeof(float), 0);
// hipMemPrefetchAsync(auth_norm, sizeof(float), 0);
// hipMemPrefetchAsync(hub_norm, sizeof(float), 0);
// hipDeviceSynchronize();
int nb = ceil(N / ((float) block_size));
// spmv<<<nb, block_size>>>(ptr2, idx2, val2, hub1, auth2, N, nnz);
hipLaunchKernelGGL(( spmv3), dim3(nb), dim3(block_size), block_size * sizeof(float), 0, rowCounter1, ptr2, idx2, val2, hub1, auth2, N);
err = hipDeviceSynchronize();
// spmv<<<nb, block_size>>>(ptr, idx, val, auth1, hub2, N, nnz);
hipLaunchKernelGGL(( spmv3), dim3(nb), dim3(block_size), block_size * sizeof(float), 0, rowCounter2, ptr, idx, val, auth1, hub2, N);
err = hipDeviceSynchronize();
hipLaunchKernelGGL(( sum), dim3(num_blocks), dim3(block_size), 0, 0, auth2, auth_norm, N);
err = hipDeviceSynchronize();
hipLaunchKernelGGL(( sum), dim3(num_blocks), dim3(block_size), 0, 0, hub2, hub_norm, N);
err = hipDeviceSynchronize();
hipLaunchKernelGGL(( divide), dim3(num_blocks), dim3(block_size), 0, 0, auth2, auth1, auth_norm, N);
err = hipDeviceSynchronize();
hipLaunchKernelGGL(( divide), dim3(num_blocks), dim3(block_size), 0, 0, hub2, hub1, hub_norm, N);
err = hipDeviceSynchronize();
auth_norm[0] = 0;
hub_norm[0] = 0;
rowCounter1[0] = 0;
rowCounter2[0] = 0;
if (debug && err) std::cout << err << std::endl;
}
end = clock_type::now();
auto tmp = chrono::duration_cast<chrono::microseconds>(end - start).count();
if (i >= skip_iterations) tot += tmp;
if (debug) {
std::cout << " gpu result=[";
for (int j = 0; j < 10; j++) {
std::cout << auth1[j] << ", ";
}
std::cout << ", ...]; time=" << (float) tmp / 1000 << " ms" << std::endl;
} else {
std::cout << i << "," << 0.0 << "," << (float) (reset_time + tmp) / 1e6 << "," << (float) reset_time / 1e6 << "," << (float) tmp / 1e6 << std::endl;
}
}
// Print;
hipDeviceSynchronize();
if (debug) std::cout << "\nmean exec time=" << (float) tot / (1000 * (num_executions - skip_iterations)) << " ms" << std::endl;
}
| d4afa3ecd06283ea24a223093e92381cb0f3af98.cu | #include <chrono>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include <iostream>
#include <set>
#include "utils.hpp"
#include "options.hpp"
#include "b7.cuh"
/////////////////////////////
/////////////////////////////
namespace chrono = std::chrono;
using clock_type = chrono::high_resolution_clock;
/////////////////////////////
/////////////////////////////
void random_coo(int* x, int *y, int *val, int N, int degree) {
for (int i = 0; i < N; i++) {
std::set<int> edges;
while (edges.size() < degree) {
edges.insert(rand() % N);
}
int j = 0;
for (auto iter = edges.begin(); iter != edges.end(); iter++, j++) {
x[i * degree + j] = i;
y[i * degree + j] = *iter;
val[i * degree + j] = 1;
}
}
}
void reset(float *auth1, float *auth2, float *hub1, float* hub2, float *auth_norm, float *hub_norm, int N) {
for (int i = 0; i < N; i++) {
auth1[i] = 1;
auth2[i] = 1;
hub1[i] = 1;
hub2[i] = 1;
}
auth_norm[0] = 0;
hub_norm[0] = 0;
// cudaMemPrefetchAsync(auth1, N * sizeof(float), 0);
// cudaMemPrefetchAsync(auth2, N * sizeof(float), 0);
// cudaMemPrefetchAsync(hub1, N * sizeof(float), 0);
// cudaMemPrefetchAsync(hub2, N * sizeof(float), 0);
// cudaMemPrefetchAsync(auth_norm, sizeof(float), 0);
// cudaMemPrefetchAsync(hub_norm, sizeof(float), 0);
// cudaDeviceSynchronize();
}
/////////////////////////////
/////////////////////////////
int main(int argc, char *argv[]) {
srand(time(0));
Options options = Options(argc, argv);
int debug = options.debug;
int num_executions = options.num_iter;
int N = options.N;
int degree = 3;
int iterations = 5;
int block_size = options.block_size_1d;
int num_blocks = options.num_blocks;
int skip_iterations = options.skip_iterations;
int err = 0;
int nnz = degree * N;
if (debug) {
std::cout << "running b7 sync" << std::endl;
std::cout << "N=" << N << std::endl;
std::cout << "num executions=" << num_executions << std::endl;
std::cout << "block size 1d=" << block_size << std::endl;
std::cout << "num blocks=" << num_blocks << std::endl;
std::cout << "skip iteration time=" << skip_iterations << std::endl;
}
auto start = clock_type::now();
int *ptr, *idx, *val, *ptr2, *idx2, *val2, *rowCounter1, *rowCounter2;
int *ptr_tmp, *idx_tmp, *val_tmp, *ptr2_tmp, *idx2_tmp, *val2_tmp;
float *auth1, *auth2, *hub1, *hub2, *auth_norm, *hub_norm;
// Use temporary CPU vectors to simplify reinitialization at each benchmark execution;
ptr_tmp = (int *) malloc(sizeof(int) * (N + 1));
ptr2_tmp = (int *) malloc(sizeof(int) * (N + 1));
idx_tmp = (int *) malloc(sizeof(int) * nnz);
idx2_tmp = (int *) malloc(sizeof(int) * nnz);
val_tmp = (int *) malloc(sizeof(int) * nnz);
val2_tmp = (int *) malloc(sizeof(int) * nnz);
err = cudaMallocManaged(&ptr, sizeof(int) * (N + 1));
err = cudaMallocManaged(&ptr2, sizeof(int) * (N + 1));
err = cudaMallocManaged(&idx, sizeof(int) * nnz);
err = cudaMallocManaged(&idx2, sizeof(int) * nnz);
err = cudaMallocManaged(&val, sizeof(int) * nnz);
err = cudaMallocManaged(&val2, sizeof(int) * nnz);
err = cudaMallocManaged(&rowCounter1, sizeof(int));
err = cudaMallocManaged(&rowCounter2, sizeof(int));
err = cudaMallocManaged(&auth1, sizeof(float) * N);
err = cudaMallocManaged(&auth2, sizeof(float) * N);
err = cudaMallocManaged(&hub1, sizeof(float) * N);
err = cudaMallocManaged(&hub2, sizeof(float) * N);
err = cudaMallocManaged(&auth_norm, sizeof(float));
err = cudaMallocManaged(&hub_norm, sizeof(float));
if (debug && err) std::cout << err << std::endl;
// Initialze arrays;
// Create a random COO;
int *x = (int*) malloc(nnz * sizeof(int));
int *y = (int*) malloc(nnz * sizeof(int));
int *v = (int*) malloc(nnz * sizeof(int));
random_coo(x, y, v, N, degree);
// Create a CSR;
coo2csr(ptr_tmp, idx_tmp, val_tmp, x, y, v, N, N, nnz);
coo2csr(ptr2_tmp, idx2_tmp, val2_tmp, y, x, v, N, N, nnz);
auto end = clock_type::now();
if (debug) std::cout << "init=" << (float) chrono::duration_cast<chrono::microseconds>(end - start).count() / 1000 << " ms" << std::endl;
// Print header;
if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl;
float tot = 0;
for (int i = 0; i < num_executions; i++) {
if (debug) std::cout << "\n-- iter=" << i << std::endl;
auto start_tmp = clock_type::now();
for (int j = 0; j < nnz; j++) {
idx[j] = idx_tmp[j];
idx2[j] = idx2_tmp[j];
val[j] = val_tmp[j];
val2[j] = val2_tmp[j];
}
for (int j = 0; j < N + 1; j++) {
ptr[j] = ptr_tmp[j];
ptr2[j] = ptr2_tmp[j];
}
reset(auth1, auth2, hub1, hub2, auth_norm, hub_norm, N);
rowCounter1[0] = 0;
rowCounter2[0] = 0;
auto end_tmp = clock_type::now();
auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count();
if (debug) std::cout << " reset=" << (float) reset_time / 1000 << " ms" << std::endl;
start = clock_type::now();
for (int iter = 0; iter < iterations; iter++) {
// cudaMemPrefetchAsync(auth1, N * sizeof(float), 0);
// cudaMemPrefetchAsync(auth2, N * sizeof(float), 0);
// cudaMemPrefetchAsync(hub1, N * sizeof(float), 0);
// cudaMemPrefetchAsync(hub2, N * sizeof(float), 0);
// cudaMemPrefetchAsync(auth_norm, sizeof(float), 0);
// cudaMemPrefetchAsync(hub_norm, sizeof(float), 0);
// cudaDeviceSynchronize();
int nb = ceil(N / ((float) block_size));
// spmv<<<nb, block_size>>>(ptr2, idx2, val2, hub1, auth2, N, nnz);
spmv3<<<nb, block_size, block_size * sizeof(float)>>>(rowCounter1, ptr2, idx2, val2, hub1, auth2, N);
err = cudaDeviceSynchronize();
// spmv<<<nb, block_size>>>(ptr, idx, val, auth1, hub2, N, nnz);
spmv3<<<nb, block_size, block_size * sizeof(float)>>>(rowCounter2, ptr, idx, val, auth1, hub2, N);
err = cudaDeviceSynchronize();
sum<<<num_blocks, block_size>>>(auth2, auth_norm, N);
err = cudaDeviceSynchronize();
sum<<<num_blocks, block_size>>>(hub2, hub_norm, N);
err = cudaDeviceSynchronize();
divide<<<num_blocks, block_size>>>(auth2, auth1, auth_norm, N);
err = cudaDeviceSynchronize();
divide<<<num_blocks, block_size>>>(hub2, hub1, hub_norm, N);
err = cudaDeviceSynchronize();
auth_norm[0] = 0;
hub_norm[0] = 0;
rowCounter1[0] = 0;
rowCounter2[0] = 0;
if (debug && err) std::cout << err << std::endl;
}
end = clock_type::now();
auto tmp = chrono::duration_cast<chrono::microseconds>(end - start).count();
if (i >= skip_iterations) tot += tmp;
if (debug) {
std::cout << " gpu result=[";
for (int j = 0; j < 10; j++) {
std::cout << auth1[j] << ", ";
}
std::cout << ", ...]; time=" << (float) tmp / 1000 << " ms" << std::endl;
} else {
std::cout << i << "," << 0.0 << "," << (float) (reset_time + tmp) / 1e6 << "," << (float) reset_time / 1e6 << "," << (float) tmp / 1e6 << std::endl;
}
}
// Print;
cudaDeviceSynchronize();
if (debug) std::cout << "\nmean exec time=" << (float) tot / (1000 * (num_executions - skip_iterations)) << " ms" << std::endl;
}
|
6a8e0ba9ccc641a775ec915031f088a5dd10c9bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "render2D.h"
#include <limits.h>
#include <stdlib.h>
//#include <hiprand/hiprand_kernel.h>
//#include <helper_cuda.h>
#include <time.h>
#include "cuda_check_error.h"
#include "utility.h"
#define TX 64
#define TY 32
#define TZ 32
hiprandState_t* devStates2D=nullptr;
__global__ void setupSeeds2DKernel ( hiprandState_t * state, unsigned long seed )
{
int id = threadIdx.x;
hiprand_init ( seed, id, 0, &state[id] );
}
void setupPlaneSeeds(int tx)
{
CudaSafeCall(hipMalloc(&devStates2D, tx*sizeof(hiprandState_t)));
hipLaunchKernelGGL(( setupSeeds2DKernel), dim3(1),dim3(tx), 0, 0, devStates2D,time(nullptr));
CudaCheckError();
}
__global__ void renderPlaneKernel(float *d_pixels,int nx,int ny,float3 *d_pc,int len, plane *d_plane,float* d_max_density,
camera* d_cam, float radius,int *d_mutex,int ns,hiprandState_t* globalState)
{
hiprandState_t localState = globalState[threadIdx.x];
const int pixel_index = blockIdx.x*blockDim.x+threadIdx.x;
const int pc_index = blockIdx.y*blockDim.y+threadIdx.y;
if(pixel_index>=nx*ny || pc_index>=len)
return;
int i,j;
i=pixel_index%nx;
j=pixel_index/nx;
for(int s=0;s<ns;s++)
{
float u,v;
if(ns==1){
u=float(i)/float(nx);
v=float(j)/float(ny);
}
else
{
u=float(i+hiprand_uniform(&localState)-0.5)/float(nx);
v=float(j+hiprand_uniform(&localState)-0.5)/float(ny);
}
ray r=d_cam->get_ray(u,v);
float3 intersect_pt;
if(!d_plane->intersects_with(r,intersect_pt))
{
#ifdef VERBOSE
printf("ray into (%d,%d) does not intersect with plane!\n",i,j);
#endif
continue;
}
#ifdef VERBOSE
//printf("ray into (%d,%d) intersects with plane @(%f,%f,%f)!\n",i,j,intersect_pt.x,intersect_pt.y,intersect_pt.z);
#endif
if(length(intersect_pt-d_pc[pc_index])<=radius)
{
#ifdef VERBOSE
printf("Hit!\n");
#endif
bool go=true;
while(go)
{
if (0 == (atomicCAS(&d_mutex[pixel_index],0,1)))
{
d_pixels[pixel_index] += 1.0/ns;
*d_max_density = max(*d_max_density, d_pixels[pixel_index]);
atomicExch(&d_mutex[pixel_index], 0);
go=false;
}
}
}
}
}
| 6a8e0ba9ccc641a775ec915031f088a5dd10c9bb.cu | #include "render2D.h"
#include <limits.h>
#include <stdlib.h>
//#include <curand_uniform.h>
//#include <helper_cuda.h>
#include <time.h>
#include "cuda_check_error.h"
#include "utility.h"
#define TX 64
#define TY 32
#define TZ 32
curandState* devStates2D=nullptr;
__global__ void setupSeeds2DKernel ( curandState * state, unsigned long seed )
{
int id = threadIdx.x;
curand_init ( seed, id, 0, &state[id] );
}
void setupPlaneSeeds(int tx)
{
CudaSafeCall(cudaMalloc(&devStates2D, tx*sizeof(curandState)));
setupSeeds2DKernel<<<1,tx>>>(devStates2D,time(nullptr));
CudaCheckError();
}
__global__ void renderPlaneKernel(float *d_pixels,int nx,int ny,float3 *d_pc,int len, plane *d_plane,float* d_max_density,
camera* d_cam, float radius,int *d_mutex,int ns,curandState* globalState)
{
curandState localState = globalState[threadIdx.x];
const int pixel_index = blockIdx.x*blockDim.x+threadIdx.x;
const int pc_index = blockIdx.y*blockDim.y+threadIdx.y;
if(pixel_index>=nx*ny || pc_index>=len)
return;
int i,j;
i=pixel_index%nx;
j=pixel_index/nx;
for(int s=0;s<ns;s++)
{
float u,v;
if(ns==1){
u=float(i)/float(nx);
v=float(j)/float(ny);
}
else
{
u=float(i+curand_uniform(&localState)-0.5)/float(nx);
v=float(j+curand_uniform(&localState)-0.5)/float(ny);
}
ray r=d_cam->get_ray(u,v);
float3 intersect_pt;
if(!d_plane->intersects_with(r,intersect_pt))
{
#ifdef VERBOSE
printf("ray into (%d,%d) does not intersect with plane!\n",i,j);
#endif
continue;
}
#ifdef VERBOSE
//printf("ray into (%d,%d) intersects with plane @(%f,%f,%f)!\n",i,j,intersect_pt.x,intersect_pt.y,intersect_pt.z);
#endif
if(length(intersect_pt-d_pc[pc_index])<=radius)
{
#ifdef VERBOSE
printf("Hit!\n");
#endif
bool go=true;
while(go)
{
if (0 == (atomicCAS(&d_mutex[pixel_index],0,1)))
{
d_pixels[pixel_index] += 1.0/ns;
*d_max_density = max(*d_max_density, d_pixels[pixel_index]);
atomicExch(&d_mutex[pixel_index], 0);
go=false;
}
}
}
}
}
|
5b1c89a9a37d11838a6d41949490b0c64472a1d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <HugeCTR/include/utils.hpp>
#include <algorithm>
#include <cstdio>
#include <ctime>
#include <functional>
#include <layers/dropout_layer.hpp>
#include <prims/linalg/binary_op.cuh>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
template <typename T>
DropoutLayer<T>::DropoutLayer(
const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>> blobs_buff, float rate,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), rate_(rate), scale_(1.0 / (1.0 - rate)) {
assert(in_tensor.get_num_elements() == out_tensor.get_num_elements());
assert(rate_ > 0.f && rate_ < 1.f);
const auto& in_tensor_dim = in_tensor.get_dimensions();
in_tensors_.emplace_back(in_tensor);
out_tensors_.emplace_back(out_tensor);
CudaDeviceContext context(get_device_id());
size_t num_feature = in_tensor_dim[1];
int batch_size = in_tensor_dim[0];
cudnnDataType_t data_type = CudnnDataType<T>::getType();
int n_stride = num_feature;
int w_stride = 1;
CK_CUDNN_THROW_(cudnnCreateTensorDescriptor(&in_out_desc_));
CK_CUDNN_THROW_(cudnnSetTensor4dDescriptorEx(in_out_desc_, data_type, batch_size, 1, 1,
num_feature, n_stride, 1, 1, w_stride));
CK_CUDNN_THROW_(cudnnCreateDropoutDescriptor(&dropout_descriptor_));
size_t sizeInBytes = 0;
CK_CUDNN_THROW_(cudnnDropoutGetStatesSize(gpu_resource->get_cudnn_handle(), &sizeInBytes));
assert(sizeInBytes != 0);
CK_CUDNN_THROW_(cudnnDropoutGetReserveSpaceSize(in_out_desc_, &reserveSpaceSizeInBytes_));
blobs_buff->reserve({1, reserveSpaceSizeInBytes_}, &mask_);
CK_CUDA_THROW_(hipMalloc(&cudnn_status_, sizeInBytes));
CK_CUDNN_THROW_(cudnnSetDropoutDescriptor(dropout_descriptor_, gpu_resource->get_cudnn_handle(),
rate, cudnn_status_, sizeInBytes, 0));
}
template <typename T>
DropoutLayer<T>::~DropoutLayer() {
try {
CK_CUDNN_THROW_(cudnnDestroyDropoutDescriptor(dropout_descriptor_));
CK_CUDA_THROW_(hipFree(cudnn_status_));
CK_CUDNN_THROW_(cudnnDestroyTensorDescriptor(in_out_desc_));
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
}
}
template <typename T>
void DropoutLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
if (is_train) {
CK_CUDNN_THROW_(cudnnDropoutForward(
get_gpu().get_cudnn_handle(), dropout_descriptor_, in_out_desc_, in_tensors_[0].get_ptr(),
in_out_desc_, out_tensors_[0].get_ptr(), mask_.get_ptr(), reserveSpaceSizeInBytes_));
} else {
CK_CUDA_THROW_(hipMemcpyAsync(out_tensors_[0].get_ptr(), in_tensors_[0].get_ptr(),
in_tensors_[0].get_size_in_bytes(), hipMemcpyDeviceToDevice,
get_gpu().get_stream()));
}
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
void DropoutLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
CK_CUDNN_THROW_(cudnnDropoutBackward(
get_gpu().get_cudnn_handle(), dropout_descriptor_, in_out_desc_, out_tensors_[0].get_ptr(),
in_out_desc_, in_tensors_[0].get_ptr(), mask_.get_ptr(), reserveSpaceSizeInBytes_));
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template class DropoutLayer<float>;
template class DropoutLayer<__half>;
} // namespace HugeCTR
| 5b1c89a9a37d11838a6d41949490b0c64472a1d9.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <HugeCTR/include/utils.hpp>
#include <algorithm>
#include <cstdio>
#include <ctime>
#include <functional>
#include <layers/dropout_layer.hpp>
#include <prims/linalg/binary_op.cuh>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
template <typename T>
DropoutLayer<T>::DropoutLayer(
const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>> blobs_buff, float rate,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), rate_(rate), scale_(1.0 / (1.0 - rate)) {
assert(in_tensor.get_num_elements() == out_tensor.get_num_elements());
assert(rate_ > 0.f && rate_ < 1.f);
const auto& in_tensor_dim = in_tensor.get_dimensions();
in_tensors_.emplace_back(in_tensor);
out_tensors_.emplace_back(out_tensor);
CudaDeviceContext context(get_device_id());
size_t num_feature = in_tensor_dim[1];
int batch_size = in_tensor_dim[0];
cudnnDataType_t data_type = CudnnDataType<T>::getType();
int n_stride = num_feature;
int w_stride = 1;
CK_CUDNN_THROW_(cudnnCreateTensorDescriptor(&in_out_desc_));
CK_CUDNN_THROW_(cudnnSetTensor4dDescriptorEx(in_out_desc_, data_type, batch_size, 1, 1,
num_feature, n_stride, 1, 1, w_stride));
CK_CUDNN_THROW_(cudnnCreateDropoutDescriptor(&dropout_descriptor_));
size_t sizeInBytes = 0;
CK_CUDNN_THROW_(cudnnDropoutGetStatesSize(gpu_resource->get_cudnn_handle(), &sizeInBytes));
assert(sizeInBytes != 0);
CK_CUDNN_THROW_(cudnnDropoutGetReserveSpaceSize(in_out_desc_, &reserveSpaceSizeInBytes_));
blobs_buff->reserve({1, reserveSpaceSizeInBytes_}, &mask_);
CK_CUDA_THROW_(cudaMalloc(&cudnn_status_, sizeInBytes));
CK_CUDNN_THROW_(cudnnSetDropoutDescriptor(dropout_descriptor_, gpu_resource->get_cudnn_handle(),
rate, cudnn_status_, sizeInBytes, 0));
}
template <typename T>
DropoutLayer<T>::~DropoutLayer() {
try {
CK_CUDNN_THROW_(cudnnDestroyDropoutDescriptor(dropout_descriptor_));
CK_CUDA_THROW_(cudaFree(cudnn_status_));
CK_CUDNN_THROW_(cudnnDestroyTensorDescriptor(in_out_desc_));
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
}
}
template <typename T>
void DropoutLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
if (is_train) {
CK_CUDNN_THROW_(cudnnDropoutForward(
get_gpu().get_cudnn_handle(), dropout_descriptor_, in_out_desc_, in_tensors_[0].get_ptr(),
in_out_desc_, out_tensors_[0].get_ptr(), mask_.get_ptr(), reserveSpaceSizeInBytes_));
} else {
CK_CUDA_THROW_(cudaMemcpyAsync(out_tensors_[0].get_ptr(), in_tensors_[0].get_ptr(),
in_tensors_[0].get_size_in_bytes(), cudaMemcpyDeviceToDevice,
get_gpu().get_stream()));
}
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
void DropoutLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
CK_CUDNN_THROW_(cudnnDropoutBackward(
get_gpu().get_cudnn_handle(), dropout_descriptor_, in_out_desc_, out_tensors_[0].get_ptr(),
in_out_desc_, in_tensors_[0].get_ptr(), mask_.get_ptr(), reserveSpaceSizeInBytes_));
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template class DropoutLayer<float>;
template class DropoutLayer<__half>;
} // namespace HugeCTR
|
76447833b08b1ffa9f7760d435001caa1f108bee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void prod( int taille, float * a, float b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]*b;
} | 76447833b08b1ffa9f7760d435001caa1f108bee.cu | #include "includes.h"
__global__ void prod( int taille, float * a, float b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]*b;
} |
0b3c9b549e9f238f36c8d5a52fec2bd6cde05b50.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include "dot_based_interact_shared_utils.cuh"
using namespace nvcuda;
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_LENGTH,
uint TILE_LENGTH_LOG_2,
uint TILE_WIDTH,
uint TILE_WIDTH_LOG_2,
uint ROW_TILES_PER_STEP>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractTF32FwdKernel(const float *__restrict input,
float *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint smem_elems_per_warp,
uint output_size,
uint num_row_steps,
uint num_col_steps,
uint smem_stride,
uint smem_stride_acc,
uint padding_size) {
// The only support sizes for TF32.
const uint kWmmaM = 16;
const uint kWmmaN = 16;
const uint kWmmaK = 8;
uint warp_id = threadIdx.x >> WARP_SIZE_LOG_2;
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ float shmem_dynamic_float[];
float *shmem = shmem_dynamic_float + (warp_id * smem_elems_per_warp);
const float *gmem_input = input + num_rows * num_cols * sample_id;
if (lane_id < (num_cols >> 2)) {
for (int i = 0; i < num_rows; ++i, gmem_input += num_cols) {
float4 tmp = ((float4 *)gmem_input)[lane_id];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)(shmem + i * smem_stride))[lane_id] = tmp;
}
}
float zero = wmma::__float_to_tf32(0.0f);
float4 zero4;
zero4.x = zero;
zero4.y = zero;
zero4.z = zero;
zero4.w = zero;
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint i = 0; i < num_rows; ++i) {
(shmem + i * smem_stride)[idx] = zero;
}
}
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((float4 *)(shmem + i * smem_stride))[lane_id] = zero4;
}
}
__syncwarp();
// TODO: MTMD - Copy directly without using shared memory
float *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float4 *)gmem_output)[lane_id] = ((float4 *)shmem)[lane_id];
}
wmma::fragment<wmma::accumulator, kWmmaM, kWmmaN, kWmmaK, float> acc[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::fill_fragment(acc[i][j], zero);
}
}
// TODO: MTMD - Loop promotion
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major>
a[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::col_major>
b[ROW_TILES_PER_STEP];
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
int base_row = (j < ROW_TILES_PER_STEP - 1) ? j * 16 : num_rows_after_padding - 16;
const float *tile_ptr = shmem + (base_row * smem_stride + k_step * kWmmaK);
wmma::load_matrix_sync(a[j], tile_ptr, smem_stride);
wmma::load_matrix_sync(b[j], tile_ptr, smem_stride);
}
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
float *tile_ptr = shmem + (i * kWmmaM * smem_stride_acc + j * kWmmaN);
wmma::store_matrix_sync(tile_ptr, acc[i][j], smem_stride_acc, wmma::mem_row_major);
}
}
float *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = ROW_TILES_PER_STEP * 16 - num_rows_after_padding;
int src_line = 0;
for (int i = 0; i < num_rows; ++i, ++src_line) {
if (i == ((ROW_TILES_PER_STEP - 1) * 16)) {
src_line += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] = shmem[src_line * smem_stride_acc + lane_id];
}
}
// Add padding to the output vectors
if (lane_id < padding_size) {
gmem_output[output_size - lane_id - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint FRAG_A_ROWS,
uint FRAG_B_COLS,
uint TILE_LENGTH,
uint TILE_LENGTH_LOG_2,
uint TILE_WIDTH,
uint TILE_WIDTH_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractTF32BwdKernel(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint shared_mem_per_warp_size_elems,
uint num_k_steps,
uint num_n_steps) {
// The only support sizes for TF32.
const uint kWmmaM = 16;
const uint kWmmaN = 16;
const uint kWmmaK = 8;
extern __shared__ float shared_mem_float[];
uint warp_id = threadIdx.x >> WARP_SIZE_LOG_2;
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
uint smem_warp_offset = warp_id * shared_mem_per_warp_size_elems;
float *smem_in = &shared_mem_float[smem_warp_offset];
float *smem_ugrad = &shared_mem_float[smem_warp_offset + input_size_elems];
float *smem_out = &shared_mem_float[smem_warp_offset + input_size_elems + interaction_ugrad_2D_size_elems];
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const float *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
float *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
float *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const float *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const float *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 2); idx += WARP_SIZE) {
float4 tmp = ((float4 *)gmem_ugrad_interactions)[idx];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)smem_in)[idx] = tmp;
}
uint offset = (interaction_ugrad_size >> 2) << 2;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = wmma::__float_to_tf32(gmem_ugrad_interactions[idx]);
}
__syncwarp();
float zero = wmma::__float_to_tf32(0.0f);
float4 zero4;
zero4.x = zero;
zero4.y = zero;
zero4.z = zero;
zero4.w = zero;
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
float ugrad_val = zero;
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_ugrad[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = zero;
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
const float *gmem_row_ptr = &gmem_input[row * num_cols];
float4 tmp = ((float4 *)gmem_row_ptr)[lane_id];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)smem_row_ptr)[lane_id] = tmp;
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = zero;
}
}
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
((float4 *)smem_row_ptr)[lane_id] = zero4;
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> a[FRAG_A_ROWS];
wmma::fragment<wmma::matrix_b, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> b[FRAG_B_COLS];
wmma::fragment<wmma::accumulator, kWmmaM, kWmmaN, kWmmaK, float> acc[FRAG_A_ROWS][FRAG_B_COLS];
for (uint n = 0; n < num_n_steps; n++) {
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
wmma::fill_fragment(acc[i][j], zero);
}
}
for (uint k = 0; k < num_k_steps; k++) {
for (uint i = 0; i < FRAG_A_ROWS; i++) {
const float *mat_a_tile_ptr =
smem_ugrad + (i << TILE_LENGTH_LOG_2) * interaction_ugrad_2D_stride + (k << TILE_WIDTH_LOG_2);
wmma::load_matrix_sync(a[i], mat_a_tile_ptr, interaction_ugrad_2D_stride);
}
for (uint j = 0; j < FRAG_B_COLS; j++) {
const float *mat_b_tile_ptr =
smem_in + (k << TILE_WIDTH_LOG_2) * input_stride + ((2 * n + j) << TILE_LENGTH_LOG_2);
wmma::load_matrix_sync(b[j], mat_b_tile_ptr, input_stride);
}
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
// __syncwarp(); ?
uint out_stride = FRAG_B_COLS << TILE_LENGTH_LOG_2;
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
float *out_tile_ptr = smem_out + (i << TILE_LENGTH_LOG_2) * out_stride + (j << TILE_LENGTH_LOG_2);
wmma::store_matrix_sync(out_tile_ptr, acc[i][j], out_stride, wmma::mem_row_major);
}
}
uint gmem_grad_col = n * (FRAG_B_COLS << TILE_LENGTH_LOG_2) + lane_id;
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = smem_out[i * out_stride + lane_id];
}
}
if (lane_id < (num_cols >> 2)) {
((float4 *)gmem_mlp_grad)[lane_id] = ((float4 *)gmem_ugrad)[lane_id];
}
}
| 0b3c9b549e9f238f36c8d5a52fec2bd6cde05b50.cu | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include "dot_based_interact_shared_utils.cuh"
using namespace nvcuda;
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_LENGTH,
uint TILE_LENGTH_LOG_2,
uint TILE_WIDTH,
uint TILE_WIDTH_LOG_2,
uint ROW_TILES_PER_STEP>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractTF32FwdKernel(const float *__restrict input,
float *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint smem_elems_per_warp,
uint output_size,
uint num_row_steps,
uint num_col_steps,
uint smem_stride,
uint smem_stride_acc,
uint padding_size) {
// The only support sizes for TF32.
const uint kWmmaM = 16;
const uint kWmmaN = 16;
const uint kWmmaK = 8;
uint warp_id = threadIdx.x >> WARP_SIZE_LOG_2;
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ float shmem_dynamic_float[];
float *shmem = shmem_dynamic_float + (warp_id * smem_elems_per_warp);
const float *gmem_input = input + num_rows * num_cols * sample_id;
if (lane_id < (num_cols >> 2)) {
for (int i = 0; i < num_rows; ++i, gmem_input += num_cols) {
float4 tmp = ((float4 *)gmem_input)[lane_id];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)(shmem + i * smem_stride))[lane_id] = tmp;
}
}
float zero = wmma::__float_to_tf32(0.0f);
float4 zero4;
zero4.x = zero;
zero4.y = zero;
zero4.z = zero;
zero4.w = zero;
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint i = 0; i < num_rows; ++i) {
(shmem + i * smem_stride)[idx] = zero;
}
}
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((float4 *)(shmem + i * smem_stride))[lane_id] = zero4;
}
}
__syncwarp();
// TODO: MTMD - Copy directly without using shared memory
float *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float4 *)gmem_output)[lane_id] = ((float4 *)shmem)[lane_id];
}
wmma::fragment<wmma::accumulator, kWmmaM, kWmmaN, kWmmaK, float> acc[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::fill_fragment(acc[i][j], zero);
}
}
// TODO: MTMD - Loop promotion
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major>
a[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::col_major>
b[ROW_TILES_PER_STEP];
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
int base_row = (j < ROW_TILES_PER_STEP - 1) ? j * 16 : num_rows_after_padding - 16;
const float *tile_ptr = shmem + (base_row * smem_stride + k_step * kWmmaK);
wmma::load_matrix_sync(a[j], tile_ptr, smem_stride);
wmma::load_matrix_sync(b[j], tile_ptr, smem_stride);
}
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
for (int i = 0; i < ROW_TILES_PER_STEP; i++) {
for (int j = 0; j < ROW_TILES_PER_STEP; j++) {
float *tile_ptr = shmem + (i * kWmmaM * smem_stride_acc + j * kWmmaN);
wmma::store_matrix_sync(tile_ptr, acc[i][j], smem_stride_acc, wmma::mem_row_major);
}
}
float *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = ROW_TILES_PER_STEP * 16 - num_rows_after_padding;
int src_line = 0;
for (int i = 0; i < num_rows; ++i, ++src_line) {
if (i == ((ROW_TILES_PER_STEP - 1) * 16)) {
src_line += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] = shmem[src_line * smem_stride_acc + lane_id];
}
}
// Add padding to the output vectors
if (lane_id < padding_size) {
gmem_output[output_size - lane_id - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint FRAG_A_ROWS,
uint FRAG_B_COLS,
uint TILE_LENGTH,
uint TILE_LENGTH_LOG_2,
uint TILE_WIDTH,
uint TILE_WIDTH_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractTF32BwdKernel(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint shared_mem_per_warp_size_elems,
uint num_k_steps,
uint num_n_steps) {
// The only support sizes for TF32.
const uint kWmmaM = 16;
const uint kWmmaN = 16;
const uint kWmmaK = 8;
extern __shared__ float shared_mem_float[];
uint warp_id = threadIdx.x >> WARP_SIZE_LOG_2;
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
uint smem_warp_offset = warp_id * shared_mem_per_warp_size_elems;
float *smem_in = &shared_mem_float[smem_warp_offset];
float *smem_ugrad = &shared_mem_float[smem_warp_offset + input_size_elems];
float *smem_out = &shared_mem_float[smem_warp_offset + input_size_elems + interaction_ugrad_2D_size_elems];
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const float *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
float *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
float *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const float *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const float *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 2); idx += WARP_SIZE) {
float4 tmp = ((float4 *)gmem_ugrad_interactions)[idx];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)smem_in)[idx] = tmp;
}
uint offset = (interaction_ugrad_size >> 2) << 2;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = wmma::__float_to_tf32(gmem_ugrad_interactions[idx]);
}
__syncwarp();
float zero = wmma::__float_to_tf32(0.0f);
float4 zero4;
zero4.x = zero;
zero4.y = zero;
zero4.z = zero;
zero4.w = zero;
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
float ugrad_val = zero;
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_ugrad[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = zero;
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
const float *gmem_row_ptr = &gmem_input[row * num_cols];
float4 tmp = ((float4 *)gmem_row_ptr)[lane_id];
tmp.x = wmma::__float_to_tf32(tmp.x);
tmp.y = wmma::__float_to_tf32(tmp.y);
tmp.z = wmma::__float_to_tf32(tmp.z);
tmp.w = wmma::__float_to_tf32(tmp.w);
((float4 *)smem_row_ptr)[lane_id] = tmp;
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = zero;
}
}
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
float *smem_row_ptr = &smem_in[row * input_stride];
((float4 *)smem_row_ptr)[lane_id] = zero4;
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> a[FRAG_A_ROWS];
wmma::fragment<wmma::matrix_b, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> b[FRAG_B_COLS];
wmma::fragment<wmma::accumulator, kWmmaM, kWmmaN, kWmmaK, float> acc[FRAG_A_ROWS][FRAG_B_COLS];
for (uint n = 0; n < num_n_steps; n++) {
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
wmma::fill_fragment(acc[i][j], zero);
}
}
for (uint k = 0; k < num_k_steps; k++) {
for (uint i = 0; i < FRAG_A_ROWS; i++) {
const float *mat_a_tile_ptr =
smem_ugrad + (i << TILE_LENGTH_LOG_2) * interaction_ugrad_2D_stride + (k << TILE_WIDTH_LOG_2);
wmma::load_matrix_sync(a[i], mat_a_tile_ptr, interaction_ugrad_2D_stride);
}
for (uint j = 0; j < FRAG_B_COLS; j++) {
const float *mat_b_tile_ptr =
smem_in + (k << TILE_WIDTH_LOG_2) * input_stride + ((2 * n + j) << TILE_LENGTH_LOG_2);
wmma::load_matrix_sync(b[j], mat_b_tile_ptr, input_stride);
}
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
// __syncwarp(); ?
uint out_stride = FRAG_B_COLS << TILE_LENGTH_LOG_2;
for (uint i = 0; i < FRAG_A_ROWS; i++) {
for (uint j = 0; j < FRAG_B_COLS; j++) {
float *out_tile_ptr = smem_out + (i << TILE_LENGTH_LOG_2) * out_stride + (j << TILE_LENGTH_LOG_2);
wmma::store_matrix_sync(out_tile_ptr, acc[i][j], out_stride, wmma::mem_row_major);
}
}
uint gmem_grad_col = n * (FRAG_B_COLS << TILE_LENGTH_LOG_2) + lane_id;
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = smem_out[i * out_stride + lane_id];
}
}
if (lane_id < (num_cols >> 2)) {
((float4 *)gmem_mlp_grad)[lane_id] = ((float4 *)gmem_ugrad)[lane_id];
}
}
|
9414a8650a82bef06e8bd0eada075f5b8e7b2934.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// transform_box: transform a box according to a given gradient
// enumerate_output: enumerate all RCNN box outputs
// --------------------------------------------------------------------------
// transform a box according to a given gradient
// box: (x1, y1, x2, y2)
// gradient: dx, dy, d(log w), d(log h)
#ifdef GPU
__device__
#endif
static
void transform_box(real box[],
const real dx, const real dy,
const real d_log_w, const real d_log_h,
const real img_W, const real img_H)
{
// width & height of box
const real w = box[2] - box[0] + 1.0f;
const real h = box[3] - box[1] + 1.0f;
// center location of box
const real ctr_x = box[0] + 0.5f * w;
const real ctr_y = box[1] + 0.5f * h;
// new center location according to gradient (dx, dy)
const real pred_ctr_x = dx * w + ctr_x;
const real pred_ctr_y = dy * h + ctr_y;
// new width & height according to gradient d(log w), d(log h)
const real pred_w = exp(d_log_w) * w;
const real pred_h = exp(d_log_h) * h;
// update upper-left corner location
box[0] = pred_ctr_x - 0.5f * pred_w;
box[1] = pred_ctr_y - 0.5f * pred_h;
// update lower-right corner location
box[2] = pred_ctr_x + 0.5f * pred_w;
box[3] = pred_ctr_y + 0.5f * pred_h;
// adjust new corner locations to be within the image region,
box[0] = MAX(0.0f, MIN(box[0], img_W - 1.0f));
box[1] = MAX(0.0f, MIN(box[1], img_H - 1.0f));
box[2] = MAX(0.0f, MIN(box[2], img_W - 1.0f));
box[3] = MAX(0.0f, MIN(box[3], img_H - 1.0f));
}
// enumerate all output boxes for each object class
// and resize boxes to raw image size
#ifdef GPU
__global__
static
void enumerate_output_gpu(const real bottom2d[],
const real d_anchor3d[],
const real roi2d[],
const int num_rois, const int num_classes,
const real img_H, const real img_W,
const real scale_H, const real scale_W,
real top2d[])
{
// index = c * num_rois + r
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num_rois * num_classes) {
const int r = index / num_classes;
const int c = index % num_classes;
const real* const p_d_anchor3d = d_anchor3d + index * 4;
const real dx = p_d_anchor3d[0];
const real dy = p_d_anchor3d[1];
const real d_log_w = p_d_anchor3d[2];
const real d_log_h = p_d_anchor3d[3];
const real* const p_roi2d = roi2d + r * 5;
real* const p_top2d = top2d + index * 6;
p_top2d[0] = c;
p_top2d[1] = p_roi2d[0];
p_top2d[2] = p_roi2d[1];
p_top2d[3] = p_roi2d[2];
p_top2d[4] = p_roi2d[3];
p_top2d[5] = bottom2d[index];
transform_box(p_top2d + 1, dx, dy, d_log_w, d_log_h, img_W, img_H);
// resize box to raw image size
p_top2d[1] /= scale_W;
p_top2d[2] /= scale_H;
p_top2d[3] /= scale_W;
p_top2d[4] /= scale_H;
}
}
#else
static
void enumerate_output_cpu(const real bottom2d[],
const real d_anchor3d[],
const real roi2d[],
const int num_rois, const int num_classes,
const real img_H, const real img_W,
const real scale_H, const real scale_W,
real top2d[])
{
for (int r = 0; r < num_rois; ++r) {
for (int c = 0; c < num_classes; ++c) {
const int index = r * num_classes + c;
const real* const p_d_anchor3d = d_anchor3d + index * 4;
const real dx = p_d_anchor3d[0];
const real dy = p_d_anchor3d[1];
const real d_log_w = p_d_anchor3d[2];
const real d_log_h = p_d_anchor3d[3];
const real* const p_roi2d = roi2d + r * 5;
real* const p_top2d = top2d + index * 6;
p_top2d[0] = c;
p_top2d[1] = p_roi2d[0];
p_top2d[2] = p_roi2d[1];
p_top2d[3] = p_roi2d[2];
p_top2d[4] = p_roi2d[3];
p_top2d[5] = bottom2d[index];
transform_box(p_top2d + 1, dx, dy, d_log_w, d_log_h, img_W, img_H);
p_top2d[1] /= scale_W;
p_top2d[2] /= scale_H;
p_top2d[3] /= scale_W;
p_top2d[4] /= scale_H;
}
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// --------------------------------------------------------------------------
static
void odtest_forward(const Tensor* const bottom2d,
const Tensor* const d_anchor3d,
const Tensor* const roi2d,
const Tensor* const img_info1d,
Tensor* const top2d,
const LayerOption* const option)
{
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom2d->data;
const real* p_d_anchor_item = d_anchor3d->data;
const real* p_roi_item = roi2d->data;
const real* p_img_info = img_info1d->data;
real* p_top_item = top2d->data;
for (int n = 0; n < bottom2d->num_items; ++n) {
const int num_rois = bottom2d->shape[n][0];
const int num_classes = bottom2d->shape[n][1];
// input image height & width
const real img_H = p_img_info[0];
const real img_W = p_img_info[1];
// scale factor for height & width
const real scale_H = p_img_info[2];
const real scale_W = p_img_info[3];
// enumerate all RCNN box outputs ("num_rois * num_classes" outputs)
#ifdef GPU
{
const int num_threads = num_rois * num_classes;
const int threads_per_block = 256;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( enumerate_output_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
p_bottom_item, p_d_anchor_item, p_roi_item,
num_rois, num_classes, img_H, img_W, scale_H, scale_W,
p_top_item);
}
#else
{
enumerate_output_cpu(
p_bottom_item, p_d_anchor_item, p_roi_item,
num_rois, num_classes, img_H, img_W, scale_H, scale_W,
p_top_item);
}
#endif
// set top shape: (num_rois * num_classes) x 6
// (class index, x1, y1, x2, y2, score) for each output
top2d->shape[n][0] = num_rois * num_classes;
top2d->shape[n][1] = 6;
// locate next item
{
const int bottom_size = num_rois * num_classes;
const int d_anchor_size = bottom_size * 4;
const int roi_size = num_rois * 5;
const int img_info_size = 6;
const int top_size = bottom_size * 6;
p_bottom_item += bottom_size;
p_d_anchor_item += d_anchor_size;
p_roi_item += roi_size;
p_img_info += img_info_size;
p_top_item += top_size;
}
} // endfor batch
top2d->ndim = 2;
top2d->num_items = bottom2d->num_items;
{
int total_size = 0;
for (int n = 0; n < bottom2d->num_items; ++n) {
const int top_size = top2d->shape[n][0] * top2d->shape[n][1];
top2d->start[n] = total_size;
total_size += top_size;
}
}
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
static
void odtest_shape(const Tensor* const bottom2d,
Tensor* const top2d,
const LayerOption* const option)
{
int total_num_rois = 0;
// calculate shape for each item in the batch
top2d->ndim = 2;
top2d->num_items = bottom2d->num_items;
for (int n = 0; n < bottom2d->num_items; ++n) {
const int num_rois = bottom2d->shape[n][0];
const int num_classes = bottom2d->shape[n][1];
// calculate total number of RoIs for determining temporary space size
total_num_rois += num_rois * num_classes;
// top shape = (num_rois * num_classes) x 6
// (class index, x1, y1, x2, y2, score) for each output
top2d->shape[n][0] = num_rois * num_classes;
top2d->shape[n][1] = 6;
top2d->start[n] = total_num_rois * 6;
}
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_odtest_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
odtest_forward(layer->p_bottoms[0], layer->p_bottoms[1],
layer->p_bottoms[2], layer->p_bottoms[3],
layer->p_tops[0],
&layer->option);
}
void shape_odtest_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
odtest_shape(layer->p_bottoms[0], layer->p_tops[0],
&layer->option);
update_net_size(net, layer, 0, 0, 0);
}
| 9414a8650a82bef06e8bd0eada075f5b8e7b2934.cu | #include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// transform_box: transform a box according to a given gradient
// enumerate_output: enumerate all RCNN box outputs
// --------------------------------------------------------------------------
// transform a box according to a given gradient
// box: (x1, y1, x2, y2)
// gradient: dx, dy, d(log w), d(log h)
#ifdef GPU
__device__
#endif
static
void transform_box(real box[],
const real dx, const real dy,
const real d_log_w, const real d_log_h,
const real img_W, const real img_H)
{
// width & height of box
const real w = box[2] - box[0] + 1.0f;
const real h = box[3] - box[1] + 1.0f;
// center location of box
const real ctr_x = box[0] + 0.5f * w;
const real ctr_y = box[1] + 0.5f * h;
// new center location according to gradient (dx, dy)
const real pred_ctr_x = dx * w + ctr_x;
const real pred_ctr_y = dy * h + ctr_y;
// new width & height according to gradient d(log w), d(log h)
const real pred_w = exp(d_log_w) * w;
const real pred_h = exp(d_log_h) * h;
// update upper-left corner location
box[0] = pred_ctr_x - 0.5f * pred_w;
box[1] = pred_ctr_y - 0.5f * pred_h;
// update lower-right corner location
box[2] = pred_ctr_x + 0.5f * pred_w;
box[3] = pred_ctr_y + 0.5f * pred_h;
// adjust new corner locations to be within the image region,
box[0] = MAX(0.0f, MIN(box[0], img_W - 1.0f));
box[1] = MAX(0.0f, MIN(box[1], img_H - 1.0f));
box[2] = MAX(0.0f, MIN(box[2], img_W - 1.0f));
box[3] = MAX(0.0f, MIN(box[3], img_H - 1.0f));
}
// enumerate all output boxes for each object class
// and resize boxes to raw image size
#ifdef GPU
__global__
static
void enumerate_output_gpu(const real bottom2d[],
const real d_anchor3d[],
const real roi2d[],
const int num_rois, const int num_classes,
const real img_H, const real img_W,
const real scale_H, const real scale_W,
real top2d[])
{
// index = c * num_rois + r
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num_rois * num_classes) {
const int r = index / num_classes;
const int c = index % num_classes;
const real* const p_d_anchor3d = d_anchor3d + index * 4;
const real dx = p_d_anchor3d[0];
const real dy = p_d_anchor3d[1];
const real d_log_w = p_d_anchor3d[2];
const real d_log_h = p_d_anchor3d[3];
const real* const p_roi2d = roi2d + r * 5;
real* const p_top2d = top2d + index * 6;
p_top2d[0] = c;
p_top2d[1] = p_roi2d[0];
p_top2d[2] = p_roi2d[1];
p_top2d[3] = p_roi2d[2];
p_top2d[4] = p_roi2d[3];
p_top2d[5] = bottom2d[index];
transform_box(p_top2d + 1, dx, dy, d_log_w, d_log_h, img_W, img_H);
// resize box to raw image size
p_top2d[1] /= scale_W;
p_top2d[2] /= scale_H;
p_top2d[3] /= scale_W;
p_top2d[4] /= scale_H;
}
}
#else
static
void enumerate_output_cpu(const real bottom2d[],
const real d_anchor3d[],
const real roi2d[],
const int num_rois, const int num_classes,
const real img_H, const real img_W,
const real scale_H, const real scale_W,
real top2d[])
{
for (int r = 0; r < num_rois; ++r) {
for (int c = 0; c < num_classes; ++c) {
const int index = r * num_classes + c;
const real* const p_d_anchor3d = d_anchor3d + index * 4;
const real dx = p_d_anchor3d[0];
const real dy = p_d_anchor3d[1];
const real d_log_w = p_d_anchor3d[2];
const real d_log_h = p_d_anchor3d[3];
const real* const p_roi2d = roi2d + r * 5;
real* const p_top2d = top2d + index * 6;
p_top2d[0] = c;
p_top2d[1] = p_roi2d[0];
p_top2d[2] = p_roi2d[1];
p_top2d[3] = p_roi2d[2];
p_top2d[4] = p_roi2d[3];
p_top2d[5] = bottom2d[index];
transform_box(p_top2d + 1, dx, dy, d_log_w, d_log_h, img_W, img_H);
p_top2d[1] /= scale_W;
p_top2d[2] /= scale_H;
p_top2d[3] /= scale_W;
p_top2d[4] /= scale_H;
}
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// --------------------------------------------------------------------------
static
void odtest_forward(const Tensor* const bottom2d,
const Tensor* const d_anchor3d,
const Tensor* const roi2d,
const Tensor* const img_info1d,
Tensor* const top2d,
const LayerOption* const option)
{
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom2d->data;
const real* p_d_anchor_item = d_anchor3d->data;
const real* p_roi_item = roi2d->data;
const real* p_img_info = img_info1d->data;
real* p_top_item = top2d->data;
for (int n = 0; n < bottom2d->num_items; ++n) {
const int num_rois = bottom2d->shape[n][0];
const int num_classes = bottom2d->shape[n][1];
// input image height & width
const real img_H = p_img_info[0];
const real img_W = p_img_info[1];
// scale factor for height & width
const real scale_H = p_img_info[2];
const real scale_W = p_img_info[3];
// enumerate all RCNN box outputs ("num_rois * num_classes" outputs)
#ifdef GPU
{
const int num_threads = num_rois * num_classes;
const int threads_per_block = 256;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
enumerate_output_gpu<<<num_blocks, threads_per_block>>>(
p_bottom_item, p_d_anchor_item, p_roi_item,
num_rois, num_classes, img_H, img_W, scale_H, scale_W,
p_top_item);
}
#else
{
enumerate_output_cpu(
p_bottom_item, p_d_anchor_item, p_roi_item,
num_rois, num_classes, img_H, img_W, scale_H, scale_W,
p_top_item);
}
#endif
// set top shape: (num_rois * num_classes) x 6
// (class index, x1, y1, x2, y2, score) for each output
top2d->shape[n][0] = num_rois * num_classes;
top2d->shape[n][1] = 6;
// locate next item
{
const int bottom_size = num_rois * num_classes;
const int d_anchor_size = bottom_size * 4;
const int roi_size = num_rois * 5;
const int img_info_size = 6;
const int top_size = bottom_size * 6;
p_bottom_item += bottom_size;
p_d_anchor_item += d_anchor_size;
p_roi_item += roi_size;
p_img_info += img_info_size;
p_top_item += top_size;
}
} // endfor batch
top2d->ndim = 2;
top2d->num_items = bottom2d->num_items;
{
int total_size = 0;
for (int n = 0; n < bottom2d->num_items; ++n) {
const int top_size = top2d->shape[n][0] * top2d->shape[n][1];
top2d->start[n] = total_size;
total_size += top_size;
}
}
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
static
void odtest_shape(const Tensor* const bottom2d,
Tensor* const top2d,
const LayerOption* const option)
{
int total_num_rois = 0;
// calculate shape for each item in the batch
top2d->ndim = 2;
top2d->num_items = bottom2d->num_items;
for (int n = 0; n < bottom2d->num_items; ++n) {
const int num_rois = bottom2d->shape[n][0];
const int num_classes = bottom2d->shape[n][1];
// calculate total number of RoIs for determining temporary space size
total_num_rois += num_rois * num_classes;
// top shape = (num_rois * num_classes) x 6
// (class index, x1, y1, x2, y2, score) for each output
top2d->shape[n][0] = num_rois * num_classes;
top2d->shape[n][1] = 6;
top2d->start[n] = total_num_rois * 6;
}
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_odtest_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
odtest_forward(layer->p_bottoms[0], layer->p_bottoms[1],
layer->p_bottoms[2], layer->p_bottoms[3],
layer->p_tops[0],
&layer->option);
}
void shape_odtest_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
odtest_shape(layer->p_bottoms[0], layer->p_tops[0],
&layer->option);
update_net_size(net, layer, 0, 0, 0);
}
|
1e4ededa4d6e87b574ce449a15ec95bc808733bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void xmpC2S_kernel(uint32_t N, uint32_t limbs, uint32_t stride, const uint32_t * in, uint32_t * out) {
//outer dimension = N
//inner dimension = limbs
//read strided in inner dimension`
//write coalesced in outer dimension
for(uint32_t i=blockIdx.x*blockDim.x+threadIdx.x;i<N;i+=blockDim.x*gridDim.x) {
for(uint32_t j=blockIdx.y*blockDim.y+threadIdx.y;j<limbs;j+=blockDim.y*gridDim.y) {
out[j*stride + i] = in[i*limbs + j];
}
}
} | 1e4ededa4d6e87b574ce449a15ec95bc808733bd.cu | #include "includes.h"
__global__ void xmpC2S_kernel(uint32_t N, uint32_t limbs, uint32_t stride, const uint32_t * in, uint32_t * out) {
//outer dimension = N
//inner dimension = limbs
//read strided in inner dimension`
//write coalesced in outer dimension
for(uint32_t i=blockIdx.x*blockDim.x+threadIdx.x;i<N;i+=blockDim.x*gridDim.x) {
for(uint32_t j=blockIdx.y*blockDim.y+threadIdx.y;j<limbs;j+=blockDim.y*gridDim.y) {
out[j*stride + i] = in[i*limbs + j];
}
}
} |
348907e790df5b20c371d11f518c5bb247884cbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_2_back [3][2];
static int dims_update_halo_kernel2_yvel_plus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_2_back_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(0,0,2);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(0,0,2);
}
__global__ void ops_update_halo_kernel2_yvel_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[0][0] * dims_update_halo_kernel2_yvel_plus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[1][0] * dims_update_halo_kernel2_yvel_plus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_2_back[0][0], dims_update_halo_kernel2_yvel_plus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_2_back[1][0], dims_update_halo_kernel2_yvel_plus_2_back[1][1], arg1);
update_halo_kernel2_yvel_plus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,45)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(45,"update_halo_kernel2_yvel_plus_2_back");
OPS_kernels[45].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_2_back_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_2_back_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_2_back, dims_update_halo_kernel2_yvel_plus_2_back_h, sizeof(dims_update_halo_kernel2_yvel_plus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[45].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[45].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[45].mpi_time += t2-t1;
OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 45;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 45;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(45,"update_halo_kernel2_yvel_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
| 348907e790df5b20c371d11f518c5bb247884cbf.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_2_back [3][2];
static int dims_update_halo_kernel2_yvel_plus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_2_back_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(0,0,2);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(0,0,2);
}
__global__ void ops_update_halo_kernel2_yvel_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[0][0] * dims_update_halo_kernel2_yvel_plus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_back[1][0] * dims_update_halo_kernel2_yvel_plus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_2_back[0][0], dims_update_halo_kernel2_yvel_plus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_2_back[1][0], dims_update_halo_kernel2_yvel_plus_2_back[1][1], arg1);
update_halo_kernel2_yvel_plus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,45)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(45,"update_halo_kernel2_yvel_plus_2_back");
OPS_kernels[45].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_2_back_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_2_back_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_2_back, dims_update_halo_kernel2_yvel_plus_2_back_h, sizeof(dims_update_halo_kernel2_yvel_plus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[45].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[45].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[45].mpi_time += t2-t1;
OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 45;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 45;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(45,"update_halo_kernel2_yvel_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
0be76ecd896fad4fb184d61fdfbaf83cba58272a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <gmpxx.h>
#include "cgbn/cgbn.h"
#include "utility/cpu_support.h"
#include "utility/gpu_support.h"
#include "mimc_constants.h"
#include "mimc.h"
// IMPORTANT: DO NOT DEFINE TPI OR BITS BEFORE INCLUDING CGBN
#define TPI 8
#define BITS 256
#define DEFAULT_TPB 768
#define MAX_CUDA_OUT 2000
typedef cgbn_context_t<TPI> context_t;
typedef cgbn_env_t<context_t, BITS> env_t;
typedef typename env_t::cgbn_t bn_t;
typedef struct {
bn_t k;
bn_t l;
bn_t r;
} feistel_state_t;
typedef struct {
int64_t x;
int64_t y;
uint32_t side_length;
uint32_t key;
uint32_t rarity;
} explore_in_t;
typedef struct {
cgbn_mem_t<BITS> hash;
int64_t x;
int64_t y;
} explore_out_item_t;
typedef struct {
explore_out_item_t planets[MAX_CUDA_OUT];
uint32_t count;
} explore_out_t;
__constant__ cgbn_mem_t<BITS> g_device_p;
__constant__ cgbn_mem_t<BITS> g_device_c[MimcConstants::rounds];
__device__ void add_mod(env_t &bn_env, bn_t &r, const bn_t &a, const bn_t &b, const bn_t &modulus)
{
cgbn_add(bn_env, r, a, b);
if (cgbn_compare(bn_env, r, modulus) == 1) {
cgbn_sub(bn_env, r, r, modulus);
}
}
__device__ void mix(env_t &bn_env, feistel_state_t &state)
{
bn_t bn_t_5, p, t, ci;
cgbn_load(bn_env, p, &g_device_p);
cgbn_set_ui32(bn_env, bn_t_5, 5);
for (int32_t i = 0; i < MimcConstants::rounds - 1; ++i) {
cgbn_load(bn_env, ci, g_device_c + i);
add_mod(bn_env, t, state.l, state.k, p);
add_mod(bn_env, t, t, ci, p);
cgbn_modular_power(bn_env, t, t, bn_t_5, p);
add_mod(bn_env, t, t, state.r, p);
cgbn_set(bn_env, state.r, state.l);
cgbn_set(bn_env, state.l, t);
}
add_mod(bn_env, t, state.l, state.k, p);
cgbn_modular_power(bn_env, t, t, bn_t_5, p);
add_mod(bn_env, state.r, t, state.r, p);
}
__device__ void inject(env_t &bn_env, feistel_state_t &state, bn_t elt)
{
bn_t p;
cgbn_load(bn_env, p, &g_device_p);
add_mod(bn_env, state.l, state.l, elt, p);
}
__device__ void mimc_sponge(env_t &bn_env,
const bn_t *inputs,
uint32_t n_inputs,
uint32_t key,
bn_t *outputs,
uint32_t n_outputs)
{
feistel_state_t state;
cgbn_set_ui32(bn_env, state.l, 0);
cgbn_set_ui32(bn_env, state.r, 0);
cgbn_set_ui32(bn_env, state.k, key);
for (int32_t i = 0; i < n_inputs; ++i) {
inject(bn_env, state, inputs[i]);
mix(bn_env, state);
}
cgbn_set(bn_env, outputs[0], state.l);
for (int32_t i = 1; i < n_outputs; ++i) {
mix(bn_env, state);
cgbn_set(bn_env, outputs[i], state.l);
}
}
__device__ void coords_to_bn(env_t &bn_env, bn_t &r, int64_t num)
{
#ifdef DF_INT32_COORDS
if (num > 0) {
cgbn_set_ui32(bn_env, r, num);
return;
}
bn_t p;
cgbn_load(bn_env, p, &g_device_p);
cgbn_sub_ui32(bn_env, r, p, abs(num));
#else
uint32_t low = llabs(num) & 0xffffffff;
uint32_t high = llabs(num) >> 32;
cgbn_set_ui32(bn_env, r, 0);
cgbn_insert_bits_ui32(bn_env, r, r, 0, 32, low);
cgbn_insert_bits_ui32(bn_env, r, r, 32, 32, high);
if (num < 0) {
bn_t p;
cgbn_load(bn_env, p, &g_device_p);
cgbn_sub(bn_env, r, p, r);
}
#endif //DF_INT32_COORDS
}
__device__ bool is_planet(env_t &bn_env, const bn_t &hash, uint32_t rarity)
{
bn_t threshold, p;
cgbn_load(bn_env, p, &g_device_p);
cgbn_div_ui32(bn_env, threshold, p, rarity);
if (cgbn_compare(bn_env, hash, threshold) == -1) {
return true;
}
return false;
}
__global__ void kernel_explore(const explore_in_t * __restrict__ explore_params,
explore_out_t * __restrict__ explore_out,
uint32_t count)
{
uint32_t instance = (blockIdx.x * blockDim.x + threadIdx.x) / TPI;
if (instance >= count) {
return;
}
context_t bn_context(cgbn_no_checks);
env_t bn_env(bn_context.env<env_t>());
bn_t hash, inputs[2];
int64_t x = explore_params->x + (instance / explore_params->side_length);
int64_t y = explore_params->y + (instance % explore_params->side_length);
coords_to_bn(bn_env, inputs[0], x);
coords_to_bn(bn_env, inputs[1], y);
mimc_sponge(bn_env, inputs, 2, explore_params->key, &hash, 1);
if (!is_planet(bn_env, hash, explore_params->rarity)) {
return;
}
extern __shared__ uint32_t result_index[];
uint32_t ii = threadIdx.x / TPI;
uint32_t group_thread = threadIdx.x & TPI-1;
if (0 == group_thread) {
result_index[ii] = atomicInc((uint32_t*)&(explore_out->count), 0xffffffff);
}
__syncthreads();
uint32_t i = result_index[ii];
if (i >= MAX_CUDA_OUT) {
return;
}
explore_out->planets[i].x = x;
explore_out->planets[i].y = y;
cgbn_store(bn_env, &(explore_out->planets[i].hash), hash);
}
void init_device_constants()
{
cgbn_mem_t<BITS> p;
cgbn_mem_t<BITS> c[MimcConstants::rounds];
from_mpz(MimcConstants::get_p().get_mpz_t(), p._limbs, BITS / 32);
for (int32_t i = 0; i < MimcConstants::rounds; ++i) {
from_mpz(MimcConstants::c_at(i).get_mpz_t(), c[i]._limbs, BITS / 32);
}
CUDA_CHECK(hipSetDevice(0));
CUDA_CHECK(hipMemcpyToSymbol(g_device_p, &p, sizeof(cgbn_mem_t<BITS>)));
CUDA_CHECK(hipMemcpyToSymbol(g_device_c, c, sizeof(cgbn_mem_t<BITS>) * MimcConstants::rounds));
}
int32_t get_block_size()
{
char *block_size_str;
block_size_str = getenv("MIMC_CUDA_BLOCK_SIZE");
if (block_size_str == NULL) {
return DEFAULT_TPB;
}
int32_t size = atoi(block_size_str);
if (size <= 0 || size > 1024) {
return DEFAULT_TPB;
}
return size;
}
void get_result(explore_out_t * cuda_result, std::vector<location_hash_t> &hashes)
{
mpz_class h;
for (int32_t i = 0; i < cuda_result->count; ++i) {
to_mpz(h.get_mpz_t(), cuda_result->planets[i].hash._limbs, BITS / 32);
hashes.push_back({h.get_str(), cuda_result->planets[i].x, cuda_result->planets[i].y});
}
}
void gpu_explore_chunk(int64_t bottom_left_x,
int64_t bottom_left_y,
uint32_t side_length,
uint32_t key,
uint32_t rarity,
std::vector<location_hash_t> &hashes)
{
explore_in_t in_params {
.x = bottom_left_x,
.y = bottom_left_y,
.side_length = side_length,
.key = key,
.rarity = rarity
};
explore_in_t * gpu_in_params;
CUDA_CHECK(hipMalloc((void **)&gpu_in_params, sizeof(explore_in_t)));
CUDA_CHECK(hipMemcpy(gpu_in_params, &in_params, sizeof(explore_in_t), hipMemcpyHostToDevice));
explore_out_t *out;
CUDA_CHECK(hipHostMalloc((void **)&out, sizeof(explore_out_t), hipHostMallocDefault));
out->count = 0;
explore_out_t *gpu_out;
CUDA_CHECK(hipMalloc((void **)&gpu_out, sizeof(explore_out_t)));
CUDA_CHECK(hipMemcpy(gpu_out, out, sizeof(explore_out_t), hipMemcpyHostToDevice));
uint32_t TPB = get_block_size();
uint32_t IPB = TPB / TPI; // IPB is instances per block
uint32_t count = side_length * side_length;
hipLaunchKernelGGL(( kernel_explore), dim3((count + IPB - 1) / IPB), dim3(TPB), sizeof (uint32_t) * IPB, 0, gpu_in_params, gpu_out, count);
CUDA_CHECK(hipDeviceSynchronize());
// copy the result back from gpuMemory
CUDA_CHECK(hipMemcpy(out, gpu_out, sizeof(explore_out_t), hipMemcpyDeviceToHost));
get_result(out, hashes);
CUDA_CHECK(hipHostFree(out));
CUDA_CHECK(hipFree(gpu_out));
}
| 0be76ecd896fad4fb184d61fdfbaf83cba58272a.cu | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <gmpxx.h>
#include "cgbn/cgbn.h"
#include "utility/cpu_support.h"
#include "utility/gpu_support.h"
#include "mimc_constants.h"
#include "mimc.h"
// IMPORTANT: DO NOT DEFINE TPI OR BITS BEFORE INCLUDING CGBN
#define TPI 8
#define BITS 256
#define DEFAULT_TPB 768
#define MAX_CUDA_OUT 2000
typedef cgbn_context_t<TPI> context_t;
typedef cgbn_env_t<context_t, BITS> env_t;
typedef typename env_t::cgbn_t bn_t;
typedef struct {
bn_t k;
bn_t l;
bn_t r;
} feistel_state_t;
typedef struct {
int64_t x;
int64_t y;
uint32_t side_length;
uint32_t key;
uint32_t rarity;
} explore_in_t;
typedef struct {
cgbn_mem_t<BITS> hash;
int64_t x;
int64_t y;
} explore_out_item_t;
typedef struct {
explore_out_item_t planets[MAX_CUDA_OUT];
uint32_t count;
} explore_out_t;
__constant__ cgbn_mem_t<BITS> g_device_p;
__constant__ cgbn_mem_t<BITS> g_device_c[MimcConstants::rounds];
__device__ void add_mod(env_t &bn_env, bn_t &r, const bn_t &a, const bn_t &b, const bn_t &modulus)
{
cgbn_add(bn_env, r, a, b);
if (cgbn_compare(bn_env, r, modulus) == 1) {
cgbn_sub(bn_env, r, r, modulus);
}
}
__device__ void mix(env_t &bn_env, feistel_state_t &state)
{
bn_t bn_t_5, p, t, ci;
cgbn_load(bn_env, p, &g_device_p);
cgbn_set_ui32(bn_env, bn_t_5, 5);
for (int32_t i = 0; i < MimcConstants::rounds - 1; ++i) {
cgbn_load(bn_env, ci, g_device_c + i);
add_mod(bn_env, t, state.l, state.k, p);
add_mod(bn_env, t, t, ci, p);
cgbn_modular_power(bn_env, t, t, bn_t_5, p);
add_mod(bn_env, t, t, state.r, p);
cgbn_set(bn_env, state.r, state.l);
cgbn_set(bn_env, state.l, t);
}
add_mod(bn_env, t, state.l, state.k, p);
cgbn_modular_power(bn_env, t, t, bn_t_5, p);
add_mod(bn_env, state.r, t, state.r, p);
}
__device__ void inject(env_t &bn_env, feistel_state_t &state, bn_t elt)
{
bn_t p;
cgbn_load(bn_env, p, &g_device_p);
add_mod(bn_env, state.l, state.l, elt, p);
}
__device__ void mimc_sponge(env_t &bn_env,
const bn_t *inputs,
uint32_t n_inputs,
uint32_t key,
bn_t *outputs,
uint32_t n_outputs)
{
feistel_state_t state;
cgbn_set_ui32(bn_env, state.l, 0);
cgbn_set_ui32(bn_env, state.r, 0);
cgbn_set_ui32(bn_env, state.k, key);
for (int32_t i = 0; i < n_inputs; ++i) {
inject(bn_env, state, inputs[i]);
mix(bn_env, state);
}
cgbn_set(bn_env, outputs[0], state.l);
for (int32_t i = 1; i < n_outputs; ++i) {
mix(bn_env, state);
cgbn_set(bn_env, outputs[i], state.l);
}
}
__device__ void coords_to_bn(env_t &bn_env, bn_t &r, int64_t num)
{
#ifdef DF_INT32_COORDS
if (num > 0) {
cgbn_set_ui32(bn_env, r, num);
return;
}
bn_t p;
cgbn_load(bn_env, p, &g_device_p);
cgbn_sub_ui32(bn_env, r, p, abs(num));
#else
uint32_t low = llabs(num) & 0xffffffff;
uint32_t high = llabs(num) >> 32;
cgbn_set_ui32(bn_env, r, 0);
cgbn_insert_bits_ui32(bn_env, r, r, 0, 32, low);
cgbn_insert_bits_ui32(bn_env, r, r, 32, 32, high);
if (num < 0) {
bn_t p;
cgbn_load(bn_env, p, &g_device_p);
cgbn_sub(bn_env, r, p, r);
}
#endif //DF_INT32_COORDS
}
__device__ bool is_planet(env_t &bn_env, const bn_t &hash, uint32_t rarity)
{
bn_t threshold, p;
cgbn_load(bn_env, p, &g_device_p);
cgbn_div_ui32(bn_env, threshold, p, rarity);
if (cgbn_compare(bn_env, hash, threshold) == -1) {
return true;
}
return false;
}
__global__ void kernel_explore(const explore_in_t * __restrict__ explore_params,
explore_out_t * __restrict__ explore_out,
uint32_t count)
{
uint32_t instance = (blockIdx.x * blockDim.x + threadIdx.x) / TPI;
if (instance >= count) {
return;
}
context_t bn_context(cgbn_no_checks);
env_t bn_env(bn_context.env<env_t>());
bn_t hash, inputs[2];
int64_t x = explore_params->x + (instance / explore_params->side_length);
int64_t y = explore_params->y + (instance % explore_params->side_length);
coords_to_bn(bn_env, inputs[0], x);
coords_to_bn(bn_env, inputs[1], y);
mimc_sponge(bn_env, inputs, 2, explore_params->key, &hash, 1);
if (!is_planet(bn_env, hash, explore_params->rarity)) {
return;
}
extern __shared__ uint32_t result_index[];
uint32_t ii = threadIdx.x / TPI;
uint32_t group_thread = threadIdx.x & TPI-1;
if (0 == group_thread) {
result_index[ii] = atomicInc((uint32_t*)&(explore_out->count), 0xffffffff);
}
__syncthreads();
uint32_t i = result_index[ii];
if (i >= MAX_CUDA_OUT) {
return;
}
explore_out->planets[i].x = x;
explore_out->planets[i].y = y;
cgbn_store(bn_env, &(explore_out->planets[i].hash), hash);
}
void init_device_constants()
{
cgbn_mem_t<BITS> p;
cgbn_mem_t<BITS> c[MimcConstants::rounds];
from_mpz(MimcConstants::get_p().get_mpz_t(), p._limbs, BITS / 32);
for (int32_t i = 0; i < MimcConstants::rounds; ++i) {
from_mpz(MimcConstants::c_at(i).get_mpz_t(), c[i]._limbs, BITS / 32);
}
CUDA_CHECK(cudaSetDevice(0));
CUDA_CHECK(cudaMemcpyToSymbol(g_device_p, &p, sizeof(cgbn_mem_t<BITS>)));
CUDA_CHECK(cudaMemcpyToSymbol(g_device_c, c, sizeof(cgbn_mem_t<BITS>) * MimcConstants::rounds));
}
int32_t get_block_size()
{
char *block_size_str;
block_size_str = getenv("MIMC_CUDA_BLOCK_SIZE");
if (block_size_str == NULL) {
return DEFAULT_TPB;
}
int32_t size = atoi(block_size_str);
if (size <= 0 || size > 1024) {
return DEFAULT_TPB;
}
return size;
}
void get_result(explore_out_t * cuda_result, std::vector<location_hash_t> &hashes)
{
mpz_class h;
for (int32_t i = 0; i < cuda_result->count; ++i) {
to_mpz(h.get_mpz_t(), cuda_result->planets[i].hash._limbs, BITS / 32);
hashes.push_back({h.get_str(), cuda_result->planets[i].x, cuda_result->planets[i].y});
}
}
void gpu_explore_chunk(int64_t bottom_left_x,
int64_t bottom_left_y,
uint32_t side_length,
uint32_t key,
uint32_t rarity,
std::vector<location_hash_t> &hashes)
{
explore_in_t in_params {
.x = bottom_left_x,
.y = bottom_left_y,
.side_length = side_length,
.key = key,
.rarity = rarity
};
explore_in_t * gpu_in_params;
CUDA_CHECK(cudaMalloc((void **)&gpu_in_params, sizeof(explore_in_t)));
CUDA_CHECK(cudaMemcpy(gpu_in_params, &in_params, sizeof(explore_in_t), cudaMemcpyHostToDevice));
explore_out_t *out;
CUDA_CHECK(cudaHostAlloc((void **)&out, sizeof(explore_out_t), cudaHostAllocDefault));
out->count = 0;
explore_out_t *gpu_out;
CUDA_CHECK(cudaMalloc((void **)&gpu_out, sizeof(explore_out_t)));
CUDA_CHECK(cudaMemcpy(gpu_out, out, sizeof(explore_out_t), cudaMemcpyHostToDevice));
uint32_t TPB = get_block_size();
uint32_t IPB = TPB / TPI; // IPB is instances per block
uint32_t count = side_length * side_length;
kernel_explore<<<(count + IPB - 1) / IPB, TPB, sizeof (uint32_t) * IPB>>>(gpu_in_params, gpu_out, count);
CUDA_CHECK(cudaDeviceSynchronize());
// copy the result back from gpuMemory
CUDA_CHECK(cudaMemcpy(out, gpu_out, sizeof(explore_out_t), cudaMemcpyDeviceToHost));
get_result(out, hashes);
CUDA_CHECK(cudaFreeHost(out));
CUDA_CHECK(cudaFree(gpu_out));
}
|
3021977a3e9d703b490f1d33456accbe4cb6c542.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
struct GPUState {
int deviceCount; // number of GPUs to use
int deviceToUse; // GPU to use (round-robin)
pthread_mutex_t initLock;
} gpustate = {-1,-1,PTHREAD_MUTEX_INITIALIZER};
// returns which GPU to run on, or -1 if no GPUs are available
int get_gpu() {
if (gpustate.deviceCount == 1)
return 0; // return immediately for the common case of 1 GPU
else if (gpustate.deviceCount > 1) { // multiple GPUs
int newval, oldval;
do {
oldval = gpustate.deviceToUse;
if (oldval == gpustate.deviceCount-1)
newval = 0;
else
newval = oldval+1;
} while (!__sync_bool_compare_and_swap(&gpustate.deviceToUse, oldval, newval));
}
else if (gpustate.deviceCount == -1) { // not yet initialized... run initialization
pthread_mutex_lock(&gpustate.initLock);
// check if another thread already completed initialization
if (gpustate.deviceCount != -1) {
pthread_mutex_unlock(&gpustate.initLock);
return get_gpu();
}
// continue with initialization
if (hipGetDeviceCount(&gpustate.deviceCount)) {
fprintf(stderr, "Cuda Error in GetDeviceCount: %s\n", hipGetErrorString(hipGetLastError()));
gpustate.deviceCount = 0;
}
else if (gpustate.deviceCount <= 0)
gpustate.deviceCount = 0;
else
gpustate.deviceToUse = 0;
for (int deviceID=0; deviceID<gpustate.deviceCount; deviceID++) {
hipSetDevice(deviceID);
hipDeviceReset();
}
pthread_mutex_unlock(&gpustate.initLock);
}
return gpustate.deviceToUse;
}
| 3021977a3e9d703b490f1d33456accbe4cb6c542.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
struct GPUState {
int deviceCount; // number of GPUs to use
int deviceToUse; // GPU to use (round-robin)
pthread_mutex_t initLock;
} gpustate = {-1,-1,PTHREAD_MUTEX_INITIALIZER};
// returns which GPU to run on, or -1 if no GPUs are available
int get_gpu() {
if (gpustate.deviceCount == 1)
return 0; // return immediately for the common case of 1 GPU
else if (gpustate.deviceCount > 1) { // multiple GPUs
int newval, oldval;
do {
oldval = gpustate.deviceToUse;
if (oldval == gpustate.deviceCount-1)
newval = 0;
else
newval = oldval+1;
} while (!__sync_bool_compare_and_swap(&gpustate.deviceToUse, oldval, newval));
}
else if (gpustate.deviceCount == -1) { // not yet initialized... run initialization
pthread_mutex_lock(&gpustate.initLock);
// check if another thread already completed initialization
if (gpustate.deviceCount != -1) {
pthread_mutex_unlock(&gpustate.initLock);
return get_gpu();
}
// continue with initialization
if (cudaGetDeviceCount(&gpustate.deviceCount)) {
fprintf(stderr, "Cuda Error in GetDeviceCount: %s\n", cudaGetErrorString(cudaGetLastError()));
gpustate.deviceCount = 0;
}
else if (gpustate.deviceCount <= 0)
gpustate.deviceCount = 0;
else
gpustate.deviceToUse = 0;
for (int deviceID=0; deviceID<gpustate.deviceCount; deviceID++) {
cudaSetDevice(deviceID);
cudaDeviceReset();
}
pthread_mutex_unlock(&gpustate.initLock);
}
return gpustate.deviceToUse;
}
|
5889b828a8756bf64c0c2842cd54a6e92205afb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_zero.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *I = NULL;
hipMalloc(&I, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int i = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_zero), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_zero), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_zero), dim3(gridBlock),dim3(threadBlock), 0, 0, A,I,n,i);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5889b828a8756bf64c0c2842cd54a6e92205afb5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_zero.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *I = NULL;
cudaMalloc(&I, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int i = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_zero<<<gridBlock,threadBlock>>>(A,I,n,i);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_zero<<<gridBlock,threadBlock>>>(A,I,n,i);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_zero<<<gridBlock,threadBlock>>>(A,I,n,i);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
600f284dcb9286e085bae3cd4d48094545d2ab24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/SpatialClassNLLCriterion.hip"
#else
void THNN_(SpatialClassNLLCriterion_shapeCheck)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *weights)
{
TORCH_CHECK(target->dim() == 3, 1,
"only batches of spatial targets supported (3D tensors)" \
" but got targets of size: : ", target->sizes());
TORCH_CHECK(input->dim() == 4, 2,
"only batches of spatial inputs supported (4D tensors), " \
"but got input of size: ", input->sizes());
if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff input_size = THCTensor_(sizeDesc)(state, input);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("input and target batch or spatial sizes don't match: target %s, input %s",
target_size.str, input_size.str);
}
if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) {
THError("weight tensor should be defined either for all or no classes");
}
}
static void THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
THCState *state,
THCTensor *gradOutput,
THCIndexTensor *target)
{
TORCH_CHECK(THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 3, 2,
"gradOutput must have same dimension as target (3) but got dimension: ", gradOutput->sizes());
if (THCTensor_(size)(state, gradOutput, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, gradOutput, 1) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, gradOutput, 2) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff gradOutput_size = THCTensor_(sizeDesc)(state, gradOutput);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("gradOutput sizes don't match target sizes: target %s, gradOutput %s",
target_size.str, gradOutput_size.str);
}
}
void THNN_(SpatialClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
TORCH_CHECK(false, "SpatialClassNLLCriterion_updateOutput not suppported with BFloat16");
#else
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resize0d)(state, output);
THCTensor_(resize0d)(state, total_weight);
if (weights)
THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, output, total_weight);
if (reduction == at::Reduction::None) {
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
int64_t count = batch_size * H * W;
THCTensor_(resize3d)(state, output, batch_size, H, W);
if (count == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( SpatialClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
toDeviceTensor<scalar_t, 4>(state, input),
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *output_data = THCTensor_(data)(state, output);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCTensor_(fill)(state, output, ScalarConvert<int, scalar_t>::to(0));
THCTensor_(fill)(state, total_weight, ScalarConvert<int, scalar_t>::to(0));
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
if (batch_size != 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == at::Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(hipGetLastError());
}
if (reduction == at::Reduction::Mean) {
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_sizeAverage_kernel), dim3(1), dim3(1), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output_data, total_weight_data, THCTensor_(nElement)(state, input)
);
THCudaCheck(hipGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
#endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__
}
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
TORCH_CHECK(false, "SpatialClassNLLCriterion_updateGradInput not suppported with BFloat16");
#else
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4,
"gradInput must be contiguous");
if (weights)
THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight);
if (reduction == at::Reduction::None) {
THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
state,
gradOutput,
target);
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
int64_t count = batch_size * H * W;
if (count == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
hipLaunchKernelGGL(( SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, gradOutput),
toDeviceTensor<scalar_t, 4>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
scalar_t *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
if (batch_size != 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateGradInput_kernel)
, dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == at::Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(hipGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
#endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__
}
#endif
| 600f284dcb9286e085bae3cd4d48094545d2ab24.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/SpatialClassNLLCriterion.cu"
#else
void THNN_(SpatialClassNLLCriterion_shapeCheck)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *weights)
{
TORCH_CHECK(target->dim() == 3, 1,
"only batches of spatial targets supported (3D tensors)" \
" but got targets of size: : ", target->sizes());
TORCH_CHECK(input->dim() == 4, 2,
"only batches of spatial inputs supported (4D tensors), " \
"but got input of size: ", input->sizes());
if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff input_size = THCTensor_(sizeDesc)(state, input);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("input and target batch or spatial sizes don't match: target %s, input %s",
target_size.str, input_size.str);
}
if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) {
THError("weight tensor should be defined either for all or no classes");
}
}
static void THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
THCState *state,
THCTensor *gradOutput,
THCIndexTensor *target)
{
TORCH_CHECK(THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 3, 2,
"gradOutput must have same dimension as target (3) but got dimension: ", gradOutput->sizes());
if (THCTensor_(size)(state, gradOutput, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, gradOutput, 1) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, gradOutput, 2) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff gradOutput_size = THCTensor_(sizeDesc)(state, gradOutput);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("gradOutput sizes don't match target sizes: target %s, gradOutput %s",
target_size.str, gradOutput_size.str);
}
}
void THNN_(SpatialClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
TORCH_CHECK(false, "SpatialClassNLLCriterion_updateOutput not suppported with BFloat16");
#else
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resize0d)(state, output);
THCTensor_(resize0d)(state, total_weight);
if (weights)
THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, output, total_weight);
if (reduction == at::Reduction::None) {
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
int64_t count = batch_size * H * W;
THCTensor_(resize3d)(state, output, batch_size, H, W);
if (count == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
SpatialClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>(
count,
toDeviceTensor<scalar_t, 4>(state, input),
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, output),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *output_data = THCTensor_(data)(state, output);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCTensor_(fill)(state, output, ScalarConvert<int, scalar_t>::to(0));
THCTensor_(fill)(state, total_weight, ScalarConvert<int, scalar_t>::to(0));
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
if (batch_size != 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
cunn_SpatialClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>
<<<total_blocks, CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
reduction == at::Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(cudaGetLastError());
}
if (reduction == at::Reduction::Mean) {
cunn_SpatialClassNLLCriterion_sizeAverage_kernel<<<1, 1, 0, c10::cuda::getCurrentCUDAStream()>>>(
output_data, total_weight_data, THCTensor_(nElement)(state, input)
);
THCudaCheck(cudaGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
#endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__
}
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
THCTensor *weights,
THCTensor *total_weight,
int64_t ignore_index)
{
#if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__)
TORCH_CHECK(false, "SpatialClassNLLCriterion_updateGradInput not suppported with BFloat16");
#else
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4,
"gradInput must be contiguous");
if (weights)
THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight);
if (reduction == at::Reduction::None) {
THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)(
state,
gradOutput,
target);
int64_t batch_size = THCTensor_(size)(state, input, 0);
int64_t H = THCTensor_(size)(state, input, 2);
int64_t W = THCTensor_(size)(state, input, 3);
int64_t count = batch_size * H * W;
if (count == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
}
SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>(
count,
toDeviceTensor<THCIndex_t, 3>(state, target),
toDeviceTensor<scalar_t, 3>(state, gradOutput),
toDeviceTensor<scalar_t, 4>(state, gradInput),
weights ? THCTensor_(data)(state, weights) : NULL,
ignore_index);
if (weights) {
THCTensor_(free)(state, weights);
}
return;
}
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput);
scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
scalar_t *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
scalar_t *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
if (batch_size != 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
cunn_SpatialClassNLLCriterion_updateGradInput_kernel
<<<total_blocks, CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>(
gradInput_data,
gradOutput_data,
target_data,
weights_data,
total_weight_data,
reduction == at::Reduction::Mean,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(cudaGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
#endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__
}
#endif
|
6754fbbc96bfbd93dab8ba46e64097c50485f2df.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 1024
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// Consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3* dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray fsailed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos2 failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 velChange1(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int numNeighbors1 = 0;
// Rule 2: boids try to stay a distance d away from each other
glm::vec3 velChange2(0.0f, 0.0f, 0.0f);
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 velChange3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
int numNeighbors3 = 0;
// Loop to find and take into account neihgbors for all rule 1, 2, 3
for (int i = 0; i < N; i++) {
if (i != iSelf) {
float dist = glm::distance(pos[i], pos[iSelf]);
if (dist < rule1Distance) {
perceivedCenter += pos[i];
numNeighbors1++;
}
if (dist < rule2Distance) {
velChange2 -= (pos[i] - pos[iSelf]);
}
if (dist < rule3Distance) {
perceivedVel += vel[i];
numNeighbors3++;
}
}
}
if (numNeighbors1 > 0)
{
perceivedCenter /= numNeighbors1;
velChange1 = (perceivedCenter - pos[iSelf]) * rule1Scale;
}
velChange2 *= rule2Scale;
if (numNeighbors3 > 0)
{
perceivedVel /= numNeighbors3;
velChange3 = perceivedVel * rule3Scale;
}
return velChange1 + velChange2 + velChange3;
}
/**
* Basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = glm::normalize(newVel);
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int boidIndex = threadIdx.x + (blockIdx.x * blockDim.x);
if (boidIndex >= N) {
return;
}
glm::vec3 boidGridCell = glm::floor(inverseCellWidth * (pos[boidIndex] - gridMin));
gridIndices[boidIndex] = gridIndex3Dto1D(boidGridCell.x, boidGridCell.y, boidGridCell.z, gridResolution);
indices[boidIndex] = boidIndex;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x; // boid index
if (index >= N) {
return;
}
int gridIndex = particleGridIndices[index];
if (index == 0 || gridIndex != particleGridIndices[index - 1]) {
gridCellStartIndices[gridIndex] = index;
}
else if (index == N - 1 || gridIndex != particleGridIndices[index + 1]) {
gridCellEndIndices[gridIndex] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int* gridCellStartIndices, int* gridCellEndIndices,
int* particleArrayIndices,
glm::vec3* pos, glm::vec3* vel1, glm::vec3* vel2) {
// Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int boidIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (boidIndex >= N)
{
return;
}
glm::vec3 boidPos = pos[boidIndex];
glm::vec3 boidGridCell = inverseCellWidth * (boidPos - gridMin); // might not be rounded/aka integer
glm::vec3 boidGridCellFloor = glm::floor(boidGridCell); // must be rounded/aka integer
// The two below has each component within [-1, 1] as integers. Basically tells
// the range relative to the current boid cell that neighbors might be in
int3 minCell = make_int3(0, 0, 0);
int3 maxCell = make_int3(0, 0, 0);
if (boidGridCellFloor.x > 0 && glm::fract(boidGridCell.x) <= 0.5f) minCell.x = -1;
if (boidGridCellFloor.y > 0 && glm::fract(boidGridCell.y) <= 0.5f) minCell.y = -1;
if (boidGridCellFloor.z > 0 && glm::fract(boidGridCell.z) <= 0.5f) minCell.z = -1;
if (boidGridCellFloor.x < gridResolution - 1 && glm::fract(boidGridCell.x) > 0.5f) maxCell.x = 1;
if (boidGridCellFloor.y < gridResolution - 1 && glm::fract(boidGridCell.y) > 0.5f) maxCell.y = 1;
if (boidGridCellFloor.z < gridResolution - 1 && glm::fract(boidGridCell.z) > 0.5f) maxCell.z = 1;
// Velocity change due to each rule
// Boids try to fly towards center of mass of neighboring boids
glm::vec3 velChange1(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int numNeighbors1 = 0;
// Boids try to keep a small distance away from other objects/boid
glm::vec3 velChange2(0.0f, 0.0f, 0.0f);
// Boids try to match velocity with nearby boids
glm::vec3 velChange3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
int numNeighbors3 = 0;
for (int k = minCell.z + boidGridCellFloor.z; k <= maxCell.z + boidGridCellFloor.z; k++) {
for (int j = minCell.y + boidGridCellFloor.y; j <= maxCell.y + boidGridCellFloor.y; j++) {
for (int i = minCell.x + boidGridCellFloor.x; i <= maxCell.x + boidGridCellFloor.x; i++) {
int boidBCellIndex = gridIndex3Dto1D(i, j, k, gridResolution);
if (gridCellStartIndices[boidBCellIndex] > -1) {
for (int b = gridCellStartIndices[boidBCellIndex]; b <= gridCellEndIndices[boidBCellIndex]; b++) {
int boidBIndex = particleArrayIndices[b];
glm::vec3 boidBPos = pos[boidBIndex];
glm::vec3 boidBVel = vel1[boidBIndex];
if (boidBIndex != boidIndex) {
float dist = glm::distance(boidBPos, boidPos);
if (dist < rule1Distance) {
// Technically, we are finding the perceived center for this point (will be averaged out later, then scale to turn into velocity)
perceivedCenter += boidBPos;
numNeighbors1++;
}
if (dist < rule2Distance) velChange2 -= (boidBPos - boidPos);
if (dist < rule3Distance) {
// Technically, we are finding the perceived velocity for this point (will be averaged out later, then scale to turn into correct velocity)
perceivedVel += boidBVel;
numNeighbors3++;
}
}
}
}
}
}
}
// Finalize velocity change from rule 1, 2 and 3
if (numNeighbors1 > 0) {
perceivedCenter /= numNeighbors1;
velChange1 = (perceivedCenter - boidPos) * rule1Scale;
}
velChange2 *= rule2Scale;
if (numNeighbors3 > 0) {
perceivedVel /= numNeighbors3;
velChange3 = perceivedVel * rule3Scale;
}
vel2[boidIndex] = vel1[boidIndex] + velChange1 + velChange2 + velChange3;
// Clamp the speed
float speed = glm::length(vel2[boidIndex]);
if (speed > maxSpeed) {
vel2[boidIndex] = maxSpeed * glm::normalize(vel2[boidIndex]);
}
}
__global__ void kernRearrangeParticleData(
int N, glm::vec3* pos1, glm::vec3* pos2, glm::vec3* vel1, glm::vec3* vel2,
int* particleArrayIndices) {
int boidIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (boidIndex >= N) {
return;
}
int newDataIndex = particleArrayIndices[boidIndex];
pos2[boidIndex] = pos1[newDataIndex];
vel2[boidIndex] = vel1[newDataIndex];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int boidIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (boidIndex >= N)
{
return;
}
glm::vec3 boidPos = pos[boidIndex];
glm::vec3 boidGridCell = inverseCellWidth * (boidPos - gridMin); // might not be rounded/aka integer
glm::vec3 boidGridCellFloor = glm::floor(boidGridCell); // must be rounded/aka integer
// The two below has each component within [-1, 1] as integers. Basically tells
// the range relative to the current boid cell that neighbors might be in
int3 minCell = make_int3(0, 0, 0);
int3 maxCell = make_int3(0, 0, 0);
if (boidGridCellFloor.x > 0 && glm::fract(boidGridCell.x) <= 0.5f) minCell.x = -1;
if (boidGridCellFloor.y > 0 && glm::fract(boidGridCell.y) <= 0.5f) minCell.y = -1;
if (boidGridCellFloor.z > 0 && glm::fract(boidGridCell.z) <= 0.5f) minCell.z = -1;
if (boidGridCellFloor.x < gridResolution - 1 && glm::fract(boidGridCell.x) > 0.5f) maxCell.x = 1;
if (boidGridCellFloor.y < gridResolution - 1 && glm::fract(boidGridCell.y) > 0.5f) maxCell.y = 1;
if (boidGridCellFloor.z < gridResolution - 1 && glm::fract(boidGridCell.z) > 0.5f) maxCell.z = 1;
// Velocity change due to each rule
// Boids try to fly towards center of mass of neighboring boids
glm::vec3 velChange1(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int numNeighbors1 = 0;
// Boids try to keep a small distance away from other objects/boid
glm::vec3 velChange2(0.0f, 0.0f, 0.0f);
// Boids try to match velocity with nearby boids
glm::vec3 velChange3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
int numNeighbors3 = 0;
for (int k = minCell.z + boidGridCellFloor.z; k <= maxCell.z + boidGridCellFloor.z; k++) {
for (int j = minCell.y + boidGridCellFloor.y; j <= maxCell.y + boidGridCellFloor.y; j++) {
for (int i = minCell.x + boidGridCellFloor.x; i <= maxCell.x + boidGridCellFloor.x; i++) {
int boidBCellIndex = gridIndex3Dto1D(i, j, k, gridResolution);
if (gridCellStartIndices[boidBCellIndex] > -1) {
for (int boidBIndex = gridCellStartIndices[boidBCellIndex]; boidBIndex <= gridCellEndIndices[boidBCellIndex]; boidBIndex++) {
glm::vec3 boidBPos = pos[boidBIndex];
glm::vec3 boidBVel = vel1[boidBIndex];
if (boidBIndex != boidIndex) {
float dist = glm::distance(boidBPos, boidPos);
if (dist < rule1Distance) {
// Technically, we are finding the perceived center for this point (will be averaged out later, then scale to turn into velocity)
perceivedCenter += boidBPos;
numNeighbors1++;
}
if (dist < rule2Distance) velChange2 -= (boidBPos - boidPos);
if (dist < rule3Distance) {
// Technically, we are finding the perceived velocity for this point (will be averaged out later, then scale to turn into correct velocity)
perceivedVel += boidBVel;
numNeighbors3++;
}
}
}
}
}
}
}
// Finalize velocity change from rule 1, 2 and 3
if (numNeighbors1 > 0) {
perceivedCenter /= numNeighbors1;
velChange1 = (perceivedCenter - boidPos) * rule1Scale;
}
velChange2 *= rule2Scale;
if (numNeighbors3 > 0) {
perceivedVel /= numNeighbors3;
velChange3 = perceivedVel * rule3Scale;
}
vel2[boidIndex] = vel1[boidIndex] + velChange1 + velChange2 + velChange3;
// Clamp the speed
float speed = glm::length(vel2[boidIndex]);
if (speed > maxSpeed) {
vel2[boidIndex] = maxSpeed * glm::normalize(vel2[boidIndex]);
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// Use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers
glm::vec3* temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
// - Ping-pong buffers as needed
dim3 objectFullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellFullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellFullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernComputeIndices << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
glm::vec3* temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// Start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 objectFullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellFullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellFullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernComputeIndices << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernRearrangeParticleData << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_pos2, dev_vel1, dev_vel2, dev_particleArrayIndices);
kernUpdateVelNeighborSearchCoherent << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos2, dev_vel2, dev_vel1);
kernUpdatePos << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos2, dev_vel1);
glm::vec3* temp = dev_pos;
dev_pos = dev_pos2;
dev_pos2 = temp;
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// Free any additional buffers here.
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_pos2);
}
void Boids::unitTest() {
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
// test kernComputeIndices -------------------
/* N = 5;
int* dev_testPos;
int* dev_testGridCellStartIndices;
int* dev_testGridCellEndIndices;
int* dev_testParticleArrayIndices;
int* dev_particleGridIndices;
std::unique_ptr<glm::vec3[]>testPos{ new glm::vec3[N] };
testPos[0] = glm::vec3(0.f, 0.f, 0.f);
testPos[1] = glm::vec3(1.f, 1.f, 1.f);
testPos[2] = glm::vec3(3.f, 3.f, 3.f);
testPos[3] = glm::vec3(5.f, 5.f, 5.f);
testPos[4] = glm::vec3(6.f, 6.f, 6.f);
hipMemcpy(dev_testPos, testPos.get(), sizeof(glm::vec3) * N, hipMemcpyHostToDevice);*/
// test kernIdentifyCellStartEnd -------------
return;
}
| 6754fbbc96bfbd93dab8ba46e64097c50485f2df.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 1024
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// Consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3* dev_pos2;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray fsailed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_pos2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos2 failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 velChange1(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int numNeighbors1 = 0;
// Rule 2: boids try to stay a distance d away from each other
glm::vec3 velChange2(0.0f, 0.0f, 0.0f);
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 velChange3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
int numNeighbors3 = 0;
// Loop to find and take into account neihgbors for all rule 1, 2, 3
for (int i = 0; i < N; i++) {
if (i != iSelf) {
float dist = glm::distance(pos[i], pos[iSelf]);
if (dist < rule1Distance) {
perceivedCenter += pos[i];
numNeighbors1++;
}
if (dist < rule2Distance) {
velChange2 -= (pos[i] - pos[iSelf]);
}
if (dist < rule3Distance) {
perceivedVel += vel[i];
numNeighbors3++;
}
}
}
if (numNeighbors1 > 0)
{
perceivedCenter /= numNeighbors1;
velChange1 = (perceivedCenter - pos[iSelf]) * rule1Scale;
}
velChange2 *= rule2Scale;
if (numNeighbors3 > 0)
{
perceivedVel /= numNeighbors3;
velChange3 = perceivedVel * rule3Scale;
}
return velChange1 + velChange2 + velChange3;
}
/**
* Basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float speed = glm::length(newVel);
if (speed > maxSpeed) {
newVel = glm::normalize(newVel);
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int boidIndex = threadIdx.x + (blockIdx.x * blockDim.x);
if (boidIndex >= N) {
return;
}
glm::vec3 boidGridCell = glm::floor(inverseCellWidth * (pos[boidIndex] - gridMin));
gridIndices[boidIndex] = gridIndex3Dto1D(boidGridCell.x, boidGridCell.y, boidGridCell.z, gridResolution);
indices[boidIndex] = boidIndex;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x; // boid index
if (index >= N) {
return;
}
int gridIndex = particleGridIndices[index];
if (index == 0 || gridIndex != particleGridIndices[index - 1]) {
gridCellStartIndices[gridIndex] = index;
}
else if (index == N - 1 || gridIndex != particleGridIndices[index + 1]) {
gridCellEndIndices[gridIndex] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int* gridCellStartIndices, int* gridCellEndIndices,
int* particleArrayIndices,
glm::vec3* pos, glm::vec3* vel1, glm::vec3* vel2) {
// Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int boidIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (boidIndex >= N)
{
return;
}
glm::vec3 boidPos = pos[boidIndex];
glm::vec3 boidGridCell = inverseCellWidth * (boidPos - gridMin); // might not be rounded/aka integer
glm::vec3 boidGridCellFloor = glm::floor(boidGridCell); // must be rounded/aka integer
// The two below has each component within [-1, 1] as integers. Basically tells
// the range relative to the current boid cell that neighbors might be in
int3 minCell = make_int3(0, 0, 0);
int3 maxCell = make_int3(0, 0, 0);
if (boidGridCellFloor.x > 0 && glm::fract(boidGridCell.x) <= 0.5f) minCell.x = -1;
if (boidGridCellFloor.y > 0 && glm::fract(boidGridCell.y) <= 0.5f) minCell.y = -1;
if (boidGridCellFloor.z > 0 && glm::fract(boidGridCell.z) <= 0.5f) minCell.z = -1;
if (boidGridCellFloor.x < gridResolution - 1 && glm::fract(boidGridCell.x) > 0.5f) maxCell.x = 1;
if (boidGridCellFloor.y < gridResolution - 1 && glm::fract(boidGridCell.y) > 0.5f) maxCell.y = 1;
if (boidGridCellFloor.z < gridResolution - 1 && glm::fract(boidGridCell.z) > 0.5f) maxCell.z = 1;
// Velocity change due to each rule
// Boids try to fly towards center of mass of neighboring boids
glm::vec3 velChange1(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int numNeighbors1 = 0;
// Boids try to keep a small distance away from other objects/boid
glm::vec3 velChange2(0.0f, 0.0f, 0.0f);
// Boids try to match velocity with nearby boids
glm::vec3 velChange3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
int numNeighbors3 = 0;
for (int k = minCell.z + boidGridCellFloor.z; k <= maxCell.z + boidGridCellFloor.z; k++) {
for (int j = minCell.y + boidGridCellFloor.y; j <= maxCell.y + boidGridCellFloor.y; j++) {
for (int i = minCell.x + boidGridCellFloor.x; i <= maxCell.x + boidGridCellFloor.x; i++) {
int boidBCellIndex = gridIndex3Dto1D(i, j, k, gridResolution);
if (gridCellStartIndices[boidBCellIndex] > -1) {
for (int b = gridCellStartIndices[boidBCellIndex]; b <= gridCellEndIndices[boidBCellIndex]; b++) {
int boidBIndex = particleArrayIndices[b];
glm::vec3 boidBPos = pos[boidBIndex];
glm::vec3 boidBVel = vel1[boidBIndex];
if (boidBIndex != boidIndex) {
float dist = glm::distance(boidBPos, boidPos);
if (dist < rule1Distance) {
// Technically, we are finding the perceived center for this point (will be averaged out later, then scale to turn into velocity)
perceivedCenter += boidBPos;
numNeighbors1++;
}
if (dist < rule2Distance) velChange2 -= (boidBPos - boidPos);
if (dist < rule3Distance) {
// Technically, we are finding the perceived velocity for this point (will be averaged out later, then scale to turn into correct velocity)
perceivedVel += boidBVel;
numNeighbors3++;
}
}
}
}
}
}
}
// Finalize velocity change from rule 1, 2 and 3
if (numNeighbors1 > 0) {
perceivedCenter /= numNeighbors1;
velChange1 = (perceivedCenter - boidPos) * rule1Scale;
}
velChange2 *= rule2Scale;
if (numNeighbors3 > 0) {
perceivedVel /= numNeighbors3;
velChange3 = perceivedVel * rule3Scale;
}
vel2[boidIndex] = vel1[boidIndex] + velChange1 + velChange2 + velChange3;
// Clamp the speed
float speed = glm::length(vel2[boidIndex]);
if (speed > maxSpeed) {
vel2[boidIndex] = maxSpeed * glm::normalize(vel2[boidIndex]);
}
}
__global__ void kernRearrangeParticleData(
int N, glm::vec3* pos1, glm::vec3* pos2, glm::vec3* vel1, glm::vec3* vel2,
int* particleArrayIndices) {
int boidIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (boidIndex >= N) {
return;
}
int newDataIndex = particleArrayIndices[boidIndex];
pos2[boidIndex] = pos1[newDataIndex];
vel2[boidIndex] = vel1[newDataIndex];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int boidIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
if (boidIndex >= N)
{
return;
}
glm::vec3 boidPos = pos[boidIndex];
glm::vec3 boidGridCell = inverseCellWidth * (boidPos - gridMin); // might not be rounded/aka integer
glm::vec3 boidGridCellFloor = glm::floor(boidGridCell); // must be rounded/aka integer
// The two below has each component within [-1, 1] as integers. Basically tells
// the range relative to the current boid cell that neighbors might be in
int3 minCell = make_int3(0, 0, 0);
int3 maxCell = make_int3(0, 0, 0);
if (boidGridCellFloor.x > 0 && glm::fract(boidGridCell.x) <= 0.5f) minCell.x = -1;
if (boidGridCellFloor.y > 0 && glm::fract(boidGridCell.y) <= 0.5f) minCell.y = -1;
if (boidGridCellFloor.z > 0 && glm::fract(boidGridCell.z) <= 0.5f) minCell.z = -1;
if (boidGridCellFloor.x < gridResolution - 1 && glm::fract(boidGridCell.x) > 0.5f) maxCell.x = 1;
if (boidGridCellFloor.y < gridResolution - 1 && glm::fract(boidGridCell.y) > 0.5f) maxCell.y = 1;
if (boidGridCellFloor.z < gridResolution - 1 && glm::fract(boidGridCell.z) > 0.5f) maxCell.z = 1;
// Velocity change due to each rule
// Boids try to fly towards center of mass of neighboring boids
glm::vec3 velChange1(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedCenter(0.0f, 0.0f, 0.0f);
int numNeighbors1 = 0;
// Boids try to keep a small distance away from other objects/boid
glm::vec3 velChange2(0.0f, 0.0f, 0.0f);
// Boids try to match velocity with nearby boids
glm::vec3 velChange3(0.0f, 0.0f, 0.0f);
glm::vec3 perceivedVel(0.0f, 0.0f, 0.0f);
int numNeighbors3 = 0;
for (int k = minCell.z + boidGridCellFloor.z; k <= maxCell.z + boidGridCellFloor.z; k++) {
for (int j = minCell.y + boidGridCellFloor.y; j <= maxCell.y + boidGridCellFloor.y; j++) {
for (int i = minCell.x + boidGridCellFloor.x; i <= maxCell.x + boidGridCellFloor.x; i++) {
int boidBCellIndex = gridIndex3Dto1D(i, j, k, gridResolution);
if (gridCellStartIndices[boidBCellIndex] > -1) {
for (int boidBIndex = gridCellStartIndices[boidBCellIndex]; boidBIndex <= gridCellEndIndices[boidBCellIndex]; boidBIndex++) {
glm::vec3 boidBPos = pos[boidBIndex];
glm::vec3 boidBVel = vel1[boidBIndex];
if (boidBIndex != boidIndex) {
float dist = glm::distance(boidBPos, boidPos);
if (dist < rule1Distance) {
// Technically, we are finding the perceived center for this point (will be averaged out later, then scale to turn into velocity)
perceivedCenter += boidBPos;
numNeighbors1++;
}
if (dist < rule2Distance) velChange2 -= (boidBPos - boidPos);
if (dist < rule3Distance) {
// Technically, we are finding the perceived velocity for this point (will be averaged out later, then scale to turn into correct velocity)
perceivedVel += boidBVel;
numNeighbors3++;
}
}
}
}
}
}
}
// Finalize velocity change from rule 1, 2 and 3
if (numNeighbors1 > 0) {
perceivedCenter /= numNeighbors1;
velChange1 = (perceivedCenter - boidPos) * rule1Scale;
}
velChange2 *= rule2Scale;
if (numNeighbors3 > 0) {
perceivedVel /= numNeighbors3;
velChange3 = perceivedVel * rule3Scale;
}
vel2[boidIndex] = vel1[boidIndex] + velChange1 + velChange2 + velChange3;
// Clamp the speed
float speed = glm::length(vel2[boidIndex]);
if (speed > maxSpeed) {
vel2[boidIndex] = maxSpeed * glm::normalize(vel2[boidIndex]);
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// Use the kernels you wrote to step the simulation forward in time.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers
glm::vec3* temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
// - Ping-pong buffers as needed
dim3 objectFullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellFullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellFullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernComputeIndices << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
glm::vec3* temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// Start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 objectFullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 cellFullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <cellFullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernComputeIndices << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
kernIdentifyCellStartEnd << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernRearrangeParticleData << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_pos2, dev_vel1, dev_vel2, dev_particleArrayIndices);
kernUpdateVelNeighborSearchCoherent << <objectFullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos2, dev_vel2, dev_vel1);
kernUpdatePos << <objectFullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos2, dev_vel1);
glm::vec3* temp = dev_pos;
dev_pos = dev_pos2;
dev_pos2 = temp;
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// Free any additional buffers here.
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_pos2);
}
void Boids::unitTest() {
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
// test kernComputeIndices -------------------
/* N = 5;
int* dev_testPos;
int* dev_testGridCellStartIndices;
int* dev_testGridCellEndIndices;
int* dev_testParticleArrayIndices;
int* dev_particleGridIndices;
std::unique_ptr<glm::vec3[]>testPos{ new glm::vec3[N] };
testPos[0] = glm::vec3(0.f, 0.f, 0.f);
testPos[1] = glm::vec3(1.f, 1.f, 1.f);
testPos[2] = glm::vec3(3.f, 3.f, 3.f);
testPos[3] = glm::vec3(5.f, 5.f, 5.f);
testPos[4] = glm::vec3(6.f, 6.f, 6.f);
cudaMemcpy(dev_testPos, testPos.get(), sizeof(glm::vec3) * N, cudaMemcpyHostToDevice);*/
// test kernIdentifyCellStartEnd -------------
return;
}
|
319c9d5c766d8c7b84fd889c89d1d5f2c2f456d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Utility / shared functionality for bisection kernels */
#ifndef _BISECT_UTIL_H_
#define _BISECT_UTIL_H_
// includes, project
#include "config.h"
#include "util.h"
////////////////////////////////////////////////////////////////////////////////
//! Compute the next lower power of two of n
//! @param n number for which next higher power of two is sought
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
floorPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << (exp - 1));
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the next higher power of two of n
//! @param n number for which next higher power of two is sought
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
ceilPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << exp);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute midpoint of interval [\a left, \a right] avoiding overflow if
//! possible
//! @param left left / lower limit of interval
//! @param right right / upper limit of interval
////////////////////////////////////////////////////////////////////////////////
__device__
inline float
computeMidpoint(const float left, const float right)
{
float mid;
if (sign_f(left) == sign_f(right))
{
mid = left + (right - left) * 0.5f;
}
else
{
mid = (left + right) * 0.5f;
}
return mid;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if interval converged and store appropriately
//! @param addr address where to store the information of the interval
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeInterval(unsigned int addr,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float right,
S left_count, S right_count,
float precision)
{
s_left_count[addr] = left_count;
s_right_count[addr] = right_count;
// check if interval converged
float t0 = abs(right - left);
float t1 = max(abs(left), abs(right)) * precision;
if (t0 <= max(MIN_ABS_INTERVAL, t1))
{
// compute mid point
float lambda = computeMidpoint(left, right);
// mark as converged
s_left[addr] = lambda;
s_right[addr] = lambda;
}
else
{
// store current limits
s_left[addr] = left;
s_right[addr] = right;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvals(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
__syncthreads();
// read data into shared memory
if (threadIdx.x < n)
{
s_d[threadIdx.x] = *(g_d + threadIdx.x);
s_s[threadIdx.x] = *(g_s + threadIdx.x - 1);
}
__syncthreads();
// perform loop only for active threads
if ((tid < num_intervals_active) && (0 == converged))
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < n; ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvalsLarge(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
unsigned int rem = n;
// do until whole diagonal and superdiagonal has been loaded and processed
for (unsigned int i = 0; i < n; i += blockDim.x)
{
__syncthreads();
// read new chunk of data into shared memory
if ((i + threadIdx.x) < n)
{
s_d[threadIdx.x] = *(g_d + i + threadIdx.x);
s_s[threadIdx.x] = *(g_s + i + threadIdx.x - 1);
}
__syncthreads();
if (tid < num_intervals_active)
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < min(rem,blockDim.x); ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
// delta = (abs( delta) < (1.0e-10)) ? -(1.0e-10) : delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
rem -= blockDim.x;
}
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Store all non-empty intervals resulting from the subdivision of the interval
//! currently processed by the thread
//! @param addr base address for storing intervals
//! @param num_threads_active number of threads / intervals in current sweep
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param s_compaction_list_exc helper array for stream compaction,
//! s_compaction_list_exc[tid] = 1 when the
//! thread generated two child intervals
//! @is_active_interval mark is thread has a second non-empty child interval
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeNonEmptyIntervals(unsigned int addr,
const unsigned int num_threads_active,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float mid, float right,
const S left_count,
const S mid_count,
const S right_count,
float precision,
unsigned int &compact_second_chunk,
T *s_compaction_list_exc,
unsigned int &is_active_second)
{
// check if both child intervals are valid
if ((left_count != mid_count) && (mid_count != right_count))
{
// store the left interval
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
// mark that a second interval has been generated, only stored after
// stream compaction of second chunk
is_active_second = 1;
s_compaction_list_exc[threadIdx.x] = 1;
compact_second_chunk = 1;
}
else
{
// only one non-empty child interval
// mark that no second child
is_active_second = 0;
s_compaction_list_exc[threadIdx.x] = 0;
// store the one valid child interval
if (left_count != mid_count)
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
}
else
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
mid, right, mid_count, right_count, precision);
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create indices for compaction, that is process \a s_compaction_list_exc
//! which is 1 for intervals that generated a second child and 0 otherwise
//! and create for each of the non-zero elements the index where the new
//! interval belongs to in a compact representation of all generated second
//! childs
//! @param s_compaction_list_exc list containing the flags which threads
//! generated two children
//! @param num_threads_compaction number of threads to employ for compaction
////////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
createIndicesCompaction(T *s_compaction_list_exc,
unsigned int num_threads_compaction)
{
unsigned int offset = 1;
const unsigned int tid = threadIdx.x;
// higher levels of scan tree
for (int d = (num_threads_compaction >> 1); d > 0; d >>= 1)
{
__syncthreads();
if (tid < d)
{
unsigned int ai = offset*(2*tid+1)-1;
unsigned int bi = offset*(2*tid+2)-1;
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
offset <<= 1;
}
// traverse down tree: first down to level 2 across
for (int d = 2; d < num_threads_compaction; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (tid < (d-1))
{
unsigned int ai = offset*(tid+1) - 1;
unsigned int bi = ai + (offset >> 1);
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
//! Perform stream compaction for second child intervals
//! @param s_left shared
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param mid midpoint of current interval (left of new interval)
//! @param right upper limit of interval
//! @param mid_count eigenvalues less than \a mid
//! @param s_compaction_list list containing the indices where the data has
//! to be stored
//! @param num_threads_active number of active threads / intervals
//! @is_active_interval mark is thread has a second non-empty child interval
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
compactIntervals(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float mid, float right,
unsigned int mid_count, unsigned int right_count,
T *s_compaction_list,
unsigned int num_threads_active,
unsigned int is_active_second)
{
const unsigned int tid = threadIdx.x;
// perform compaction / copy data for all threads where the second
// child is not dead
if ((tid < num_threads_active) && (1 == is_active_second))
{
unsigned int addr_w = num_threads_active + s_compaction_list[tid];
s_left[addr_w] = mid;
s_right[addr_w] = right;
s_left_count[addr_w] = mid_count;
s_right_count[addr_w] = right_count;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Store intervals that have already converged (w.r.t. the desired precision),
//! duplicating intervals that contain multiple eigenvalues
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval (updated if split is necessary)
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param s_compaction_list_exc helper array for stream compaction, updated
//! at tid if split is necessary
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param num_threads_active number of active threads / intervals
///////////////////////////////////////////////////////////////////////////////
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
s_right_count[tid + num_threads_active] = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
s_right_count[tid + num_threads_active] = right_count;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active,
unsigned int &is_active_second)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
is_active_second = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
is_active_second = 1;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Subdivide interval if active and not already converged
//! @param tid id of thread
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param num_threads_active number of active threads in warp
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param all_threads_converged shared memory flag if all threads are
//! converged
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
subdivideActiveInterval(const unsigned int tid,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
const unsigned int num_threads_active,
float &left, float &right,
unsigned int &left_count, unsigned int &right_count,
float &mid, unsigned int &all_threads_converged)
{
// for all active threads
if (tid < num_threads_active)
{
left = s_left[tid];
right = s_right[tid];
left_count = s_left_count[tid];
right_count = s_right_count[tid];
// check if thread already converged
if (left != right)
{
mid = computeMidpoint(left, right);
all_threads_converged = 0;
}
else if ((right_count - left_count) > 1)
{
// mark as not converged if multiple eigenvalues enclosed
// duplicate interval in storeIntervalsConverged()
all_threads_converged = 0;
}
} // end for all active threads
}
#endif // #ifndef _BISECT_UTIL_H_
| 319c9d5c766d8c7b84fd889c89d1d5f2c2f456d4.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Utility / shared functionality for bisection kernels */
#ifndef _BISECT_UTIL_H_
#define _BISECT_UTIL_H_
// includes, project
#include "config.h"
#include "util.h"
////////////////////////////////////////////////////////////////////////////////
//! Compute the next lower power of two of n
//! @param n number for which next higher power of two is sought
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
floorPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << (exp - 1));
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the next higher power of two of n
//! @param n number for which next higher power of two is sought
////////////////////////////////////////////////////////////////////////////////
__device__
inline int
ceilPow2(int n)
{
// early out if already power of two
if (0 == (n & (n-1)))
{
return n;
}
int exp;
frexp((float)n, &exp);
return (1 << exp);
}
////////////////////////////////////////////////////////////////////////////////
//! Compute midpoint of interval [\a left, \a right] avoiding overflow if
//! possible
//! @param left left / lower limit of interval
//! @param right right / upper limit of interval
////////////////////////////////////////////////////////////////////////////////
__device__
inline float
computeMidpoint(const float left, const float right)
{
float mid;
if (sign_f(left) == sign_f(right))
{
mid = left + (right - left) * 0.5f;
}
else
{
mid = (left + right) * 0.5f;
}
return mid;
}
////////////////////////////////////////////////////////////////////////////////
//! Check if interval converged and store appropriately
//! @param addr address where to store the information of the interval
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeInterval(unsigned int addr,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float right,
S left_count, S right_count,
float precision)
{
s_left_count[addr] = left_count;
s_right_count[addr] = right_count;
// check if interval converged
float t0 = abs(right - left);
float t1 = max(abs(left), abs(right)) * precision;
if (t0 <= max(MIN_ABS_INTERVAL, t1))
{
// compute mid point
float lambda = computeMidpoint(left, right);
// mark as converged
s_left[addr] = lambda;
s_right[addr] = lambda;
}
else
{
// store current limits
s_left[addr] = left;
s_right[addr] = right;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvals(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
__syncthreads();
// read data into shared memory
if (threadIdx.x < n)
{
s_d[threadIdx.x] = *(g_d + threadIdx.x);
s_s[threadIdx.x] = *(g_s + threadIdx.x - 1);
}
__syncthreads();
// perform loop only for active threads
if ((tid < num_intervals_active) && (0 == converged))
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < n; ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute number of eigenvalues that are smaller than x given a symmetric,
//! real, and tridiagonal matrix
//! @param g_d diagonal elements stored in global memory
//! @param g_s superdiagonal elements stored in global memory
//! @param n size of matrix
//! @param x value for which the number of eigenvalues that are smaller is
//! seeked
//! @param tid thread identified (e.g. threadIdx.x or gtid)
//! @param num_intervals_active number of active intervals / threads that
//! currently process an interval
//! @param s_d scratch space to store diagonal entries of the tridiagonal
//! matrix in shared memory
//! @param s_s scratch space to store superdiagonal entries of the tridiagonal
//! matrix in shared memory
//! @param converged flag if the current thread is already converged (that
//! is count does not have to be computed)
////////////////////////////////////////////////////////////////////////////////
__device__
inline unsigned int
computeNumSmallerEigenvalsLarge(float *g_d, float *g_s, const unsigned int n,
const float x,
const unsigned int tid,
const unsigned int num_intervals_active,
float *s_d, float *s_s,
unsigned int converged
)
{
float delta = 1.0f;
unsigned int count = 0;
unsigned int rem = n;
// do until whole diagonal and superdiagonal has been loaded and processed
for (unsigned int i = 0; i < n; i += blockDim.x)
{
__syncthreads();
// read new chunk of data into shared memory
if ((i + threadIdx.x) < n)
{
s_d[threadIdx.x] = *(g_d + i + threadIdx.x);
s_s[threadIdx.x] = *(g_s + i + threadIdx.x - 1);
}
__syncthreads();
if (tid < num_intervals_active)
{
// perform (optimized) Gaussian elimination to determine the number
// of eigenvalues that are smaller than n
for (unsigned int k = 0; k < min(rem,blockDim.x); ++k)
{
delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta;
// delta = (abs( delta) < (1.0e-10)) ? -(1.0e-10) : delta;
count += (delta < 0) ? 1 : 0;
}
} // end if thread currently processing an interval
rem -= blockDim.x;
}
return count;
}
////////////////////////////////////////////////////////////////////////////////
//! Store all non-empty intervals resulting from the subdivision of the interval
//! currently processed by the thread
//! @param addr base address for storing intervals
//! @param num_threads_active number of threads / intervals in current sweep
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param precision desired precision for eigenvalues
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param s_compaction_list_exc helper array for stream compaction,
//! s_compaction_list_exc[tid] = 1 when the
//! thread generated two child intervals
//! @is_active_interval mark is thread has a second non-empty child interval
////////////////////////////////////////////////////////////////////////////////
template<class S, class T>
__device__
void
storeNonEmptyIntervals(unsigned int addr,
const unsigned int num_threads_active,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float left, float mid, float right,
const S left_count,
const S mid_count,
const S right_count,
float precision,
unsigned int &compact_second_chunk,
T *s_compaction_list_exc,
unsigned int &is_active_second)
{
// check if both child intervals are valid
if ((left_count != mid_count) && (mid_count != right_count))
{
// store the left interval
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
// mark that a second interval has been generated, only stored after
// stream compaction of second chunk
is_active_second = 1;
s_compaction_list_exc[threadIdx.x] = 1;
compact_second_chunk = 1;
}
else
{
// only one non-empty child interval
// mark that no second child
is_active_second = 0;
s_compaction_list_exc[threadIdx.x] = 0;
// store the one valid child interval
if (left_count != mid_count)
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
left, mid, left_count, mid_count, precision);
}
else
{
storeInterval(addr, s_left, s_right, s_left_count, s_right_count,
mid, right, mid_count, right_count, precision);
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Create indices for compaction, that is process \a s_compaction_list_exc
//! which is 1 for intervals that generated a second child and 0 otherwise
//! and create for each of the non-zero elements the index where the new
//! interval belongs to in a compact representation of all generated second
//! childs
//! @param s_compaction_list_exc list containing the flags which threads
//! generated two children
//! @param num_threads_compaction number of threads to employ for compaction
////////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
createIndicesCompaction(T *s_compaction_list_exc,
unsigned int num_threads_compaction)
{
unsigned int offset = 1;
const unsigned int tid = threadIdx.x;
// higher levels of scan tree
for (int d = (num_threads_compaction >> 1); d > 0; d >>= 1)
{
__syncthreads();
if (tid < d)
{
unsigned int ai = offset*(2*tid+1)-1;
unsigned int bi = offset*(2*tid+2)-1;
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
offset <<= 1;
}
// traverse down tree: first down to level 2 across
for (int d = 2; d < num_threads_compaction; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (tid < (d-1))
{
unsigned int ai = offset*(tid+1) - 1;
unsigned int bi = ai + (offset >> 1);
s_compaction_list_exc[bi] = s_compaction_list_exc[bi]
+ s_compaction_list_exc[ai];
}
}
__syncthreads();
}
///////////////////////////////////////////////////////////////////////////////
//! Perform stream compaction for second child intervals
//! @param s_left shared
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param mid midpoint of current interval (left of new interval)
//! @param right upper limit of interval
//! @param mid_count eigenvalues less than \a mid
//! @param s_compaction_list list containing the indices where the data has
//! to be stored
//! @param num_threads_active number of active threads / intervals
//! @is_active_interval mark is thread has a second non-empty child interval
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
compactIntervals(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float mid, float right,
unsigned int mid_count, unsigned int right_count,
T *s_compaction_list,
unsigned int num_threads_active,
unsigned int is_active_second)
{
const unsigned int tid = threadIdx.x;
// perform compaction / copy data for all threads where the second
// child is not dead
if ((tid < num_threads_active) && (1 == is_active_second))
{
unsigned int addr_w = num_threads_active + s_compaction_list[tid];
s_left[addr_w] = mid;
s_right[addr_w] = right;
s_left_count[addr_w] = mid_count;
s_right_count[addr_w] = right_count;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Store intervals that have already converged (w.r.t. the desired precision),
//! duplicating intervals that contain multiple eigenvalues
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param left lower limit of interval
//! @param mid midpoint of interval (updated if split is necessary)
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param mid_count eigenvalues less than \a mid
//! @param right_count eigenvalues less than \a right
//! @param s_compaction_list_exc helper array for stream compaction, updated
//! at tid if split is necessary
//! @param compact_second_chunk shared mem flag if second chunk is used and
//! ergo requires compaction
//! @param num_threads_active number of active threads / intervals
///////////////////////////////////////////////////////////////////////////////
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
s_right_count[tid + num_threads_active] = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
s_right_count[tid + num_threads_active] = right_count;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
template<class T, class S>
__device__
void
storeIntervalConverged(float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
float &left, float &mid, float &right,
S &left_count, S &mid_count, S &right_count,
T *s_compaction_list_exc,
unsigned int &compact_second_chunk,
const unsigned int num_threads_active,
unsigned int &is_active_second)
{
const unsigned int tid = threadIdx.x;
const unsigned int multiplicity = right_count - left_count;
// check multiplicity of eigenvalue
if (1 == multiplicity)
{
// just re-store intervals, simple eigenvalue
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = right_count;
// mark that no second child / clear
is_active_second = 0;
s_compaction_list_exc[tid] = 0;
}
else
{
// number of eigenvalues after the split less than mid
mid_count = left_count + (multiplicity >> 1);
// store left interval
s_left[tid] = left;
s_right[tid] = right;
s_left_count[tid] = left_count;
s_right_count[tid] = mid_count;
mid = left;
// mark that second child interval exists
is_active_second = 1;
s_compaction_list_exc[tid] = 1;
compact_second_chunk = 1;
}
}
///////////////////////////////////////////////////////////////////////////////
//! Subdivide interval if active and not already converged
//! @param tid id of thread
//! @param s_left shared memory storage for left interval limits
//! @param s_right shared memory storage for right interval limits
//! @param s_left_count shared memory storage for number of eigenvalues less
//! than left interval limits
//! @param s_right_count shared memory storage for number of eigenvalues less
//! than right interval limits
//! @param num_threads_active number of active threads in warp
//! @param left lower limit of interval
//! @param right upper limit of interval
//! @param left_count eigenvalues less than \a left
//! @param right_count eigenvalues less than \a right
//! @param all_threads_converged shared memory flag if all threads are
//! converged
///////////////////////////////////////////////////////////////////////////////
template<class T>
__device__
void
subdivideActiveInterval(const unsigned int tid,
float *s_left, float *s_right,
T *s_left_count, T *s_right_count,
const unsigned int num_threads_active,
float &left, float &right,
unsigned int &left_count, unsigned int &right_count,
float &mid, unsigned int &all_threads_converged)
{
// for all active threads
if (tid < num_threads_active)
{
left = s_left[tid];
right = s_right[tid];
left_count = s_left_count[tid];
right_count = s_right_count[tid];
// check if thread already converged
if (left != right)
{
mid = computeMidpoint(left, right);
all_threads_converged = 0;
}
else if ((right_count - left_count) > 1)
{
// mark as not converged if multiple eigenvalues enclosed
// duplicate interval in storeIntervalsConverged()
all_threads_converged = 0;
}
} // end for all active threads
}
#endif // #ifndef _BISECT_UTIL_H_
|
c3b135cb83a922b4997d440e5d20bd8f0e4abf03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/linspace_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void LinspaceKernel(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T>
__global__ void LinspaceSpecialKernel(T start, T* out) {
out[0] = start;
}
template <typename T>
class CUDALinspaceKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* start_t = context.Input<framework::Tensor>("Start");
auto* stop_t = context.Input<framework::Tensor>("Stop");
auto* num_t = context.Input<framework::Tensor>("Num");
auto* out = context.Output<framework::Tensor>("Out");
framework::Tensor n;
framework::TensorCopy(*start_t, platform::CPUPlace(), &n);
T start = n.data<T>()[0];
framework::TensorCopy(*stop_t, platform::CPUPlace(), &n);
T stop = n.data<T>()[0];
framework::TensorCopy(*num_t, platform::CPUPlace(), &n);
int32_t num = n.data<int32_t>()[0];
PADDLE_ENFORCE(num > 0, "The num of linspace op should be larger than 0.");
out->Resize(framework::make_ddim({num}));
T* out_data = out->mutable_data<T>(context.GetPlace());
T step = 0;
if (num != 1) {
step = (stop - start) / (num - 1);
}
auto stream = context.cuda_device_context().stream();
int block = 512;
int grid = (num + block - 1) / block;
hipLaunchKernelGGL(( LinspaceKernel<T>), dim3(grid), dim3(block), 0, stream, start, step, num, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(linspace, ops::CUDALinspaceKernel<float>,
ops::CUDALinspaceKernel<double>);
| c3b135cb83a922b4997d440e5d20bd8f0e4abf03.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/linspace_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void LinspaceKernel(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T>
__global__ void LinspaceSpecialKernel(T start, T* out) {
out[0] = start;
}
template <typename T>
class CUDALinspaceKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* start_t = context.Input<framework::Tensor>("Start");
auto* stop_t = context.Input<framework::Tensor>("Stop");
auto* num_t = context.Input<framework::Tensor>("Num");
auto* out = context.Output<framework::Tensor>("Out");
framework::Tensor n;
framework::TensorCopy(*start_t, platform::CPUPlace(), &n);
T start = n.data<T>()[0];
framework::TensorCopy(*stop_t, platform::CPUPlace(), &n);
T stop = n.data<T>()[0];
framework::TensorCopy(*num_t, platform::CPUPlace(), &n);
int32_t num = n.data<int32_t>()[0];
PADDLE_ENFORCE(num > 0, "The num of linspace op should be larger than 0.");
out->Resize(framework::make_ddim({num}));
T* out_data = out->mutable_data<T>(context.GetPlace());
T step = 0;
if (num != 1) {
step = (stop - start) / (num - 1);
}
auto stream = context.cuda_device_context().stream();
int block = 512;
int grid = (num + block - 1) / block;
LinspaceKernel<T><<<grid, block, 0, stream>>>(start, step, num, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(linspace, ops::CUDALinspaceKernel<float>,
ops::CUDALinspaceKernel<double>);
|
6b34d86a0346c20bd3c42ff72f69aa8966a9e5cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rgb2gray(unsigned char* d_Pin, unsigned char* d_Pout, int width, int height) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if((Row < height) && (Col < width)) {
d_Pout[Row*width+Col] = d_Pin[(Row*width+Col)*3+BLUE]*0.114 + d_Pin[(Row*width+Col)*3+GREEN]*0.587 + d_Pin[(Row*width+Col)*3+RED]*0.299;
}
} | 6b34d86a0346c20bd3c42ff72f69aa8966a9e5cd.cu | #include "includes.h"
__global__ void rgb2gray(unsigned char* d_Pin, unsigned char* d_Pout, int width, int height) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if((Row < height) && (Col < width)) {
d_Pout[Row*width+Col] = d_Pin[(Row*width+Col)*3+BLUE]*0.114 + d_Pin[(Row*width+Col)*3+GREEN]*0.587 + d_Pin[(Row*width+Col)*3+RED]*0.299;
}
} |
dba207082cb876753076b3bfba151a3218e0625c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void execConvertHalfs(half *dx, sd::LongType n, void *dz) {
auto z = reinterpret_cast<T *>(dz);
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (sd::LongType i = tid; i < n; i += blockDim.x * gridDim.x) z[i] = static_cast<T>(__half2float(dx[i]));
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void convertHalfsToGeneric(dim3 &launchDims, hipStream_t *stream, half *dx, sd::LongType n, void *dz) {
hipLaunchKernelGGL(( execConvertHalfs<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, dx, n, dz);
sd::DebugHelper::checkErrorCode(stream, "convertHalfsToGeneric(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void convertHalfsToGeneric,
(dim3 & launchDims, hipStream_t *stream, half *dx, sd::LongType n, void *dz), SD_COMMON_TYPES);
} // namespace sd
| dba207082cb876753076b3bfba151a3218e0625c.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 15.11.2018
//
#include <loops/special_kernels.h>
namespace sd {
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void execConvertHalfs(half *dx, sd::LongType n, void *dz) {
auto z = reinterpret_cast<T *>(dz);
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (sd::LongType i = tid; i < n; i += blockDim.x * gridDim.x) z[i] = static_cast<T>(__half2float(dx[i]));
}
///////////////////////////////////////////////////////////////////////
template <typename T>
SD_HOST void convertHalfsToGeneric(dim3 &launchDims, cudaStream_t *stream, half *dx, sd::LongType n, void *dz) {
execConvertHalfs<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(dx, n, dz);
sd::DebugHelper::checkErrorCode(stream, "convertHalfsToGeneric(...) failed");
}
BUILD_SINGLE_TEMPLATE(template void convertHalfsToGeneric,
(dim3 & launchDims, cudaStream_t *stream, half *dx, sd::LongType n, void *dz), SD_COMMON_TYPES);
} // namespace sd
|
2ff50972500e244f4d8013557239813f523ff68b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaProcessUnsignedChar0(unsigned char *dst, unsigned char *src, int imgW, int imgH)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx * 2;
int y = blockIdx.y*bh + ty * 2;
int px = y * imgW + x;
bool flag = 0 < y && y < (imgH - 2) && 0 < x && x < (imgW - 2);
int sx1 = flag ? px - imgW : 0;
int sx2 = flag ? px - imgW + 1 : 0;
int sx3 = flag ? px - imgW + 2 : 0;
int sx4 = flag ? px - 1 : 0;
int sx5 = flag ? px : 0;
int sx6 = flag ? px + 1 : 0;
int sx7 = flag ? px + 2 : 0;
int sx8 = flag ? px + imgW - 1 : 0;
int sx9 = flag ? px + imgW : 0;
int sxa = flag ? px + imgW + 1 : 0;
int sxb = flag ? px + imgW + 2 : 0;
int sxc = flag ? px + imgW * 2 - 1 : 0;
int sxd = flag ? px + imgW * 2 : 0;
int sxe = flag ? px + imgW * 2 + 1 : 0;
// G0 R0 G1 R1 x0 x1 x2 x3
// B0 G2 B1 G3 x4 x5 x6 x7
// G4 R2 G5 R3 x8 x9 xA xB
// B2 G6 B3 G7 xC xD xE xF
int g1 = (int)src[sx2];
int g2 = (int)src[sx5];
int g3 = (int)src[sx7];
int g4 = (int)src[sx8];
int g5 = (int)src[sxa];
int g6 = (int)src[sxd];
int b0 = (int)src[sx4];
int b1 = (int)src[sx6];
int b2 = (int)src[sxc];
int b3 = (int)src[sxe];
int r0 = (int)src[sx1];
int r1 = (int)src[sx3];
int r2 = (int)src[sx9];
int r3 = (int)src[sxb];
int db0 = (b0 + b1) >> 1;
int dg0 = g2;
int dr0 = (r0 + r1) >> 1;
int db1 = b1;
int dg1 = (g1 + g2 + g3 + g5) >> 2;
int dr1 = (r0 + r1 + r2 + r3) >> 2;
int db2 = (b0 + b1 + b2 + b3) >> 2;
int dg2 = (g2 + g4 + g5 + g6) >> 2;
int dr2 = r2;
int db3 = (b1 + b3) >> 1;
int dg3 = g5;
int dr3 = (r2 + r3) >> 1;
int dx = px * 3;
int dst0 = dx;
int dst1 = dx + 3;
int dst2 = dx + imgW * 3;
int dst3 = dx + (imgW + 1) * 3;
dst[dst0 + 0 < imgW * imgH * 3 ? dst0 + 0 : 0] = (unsigned char)db0;
dst[dst0 + 1 < imgW * imgH * 3 ? dst0 + 1 : 0] = (unsigned char)dg0;
dst[dst0 + 2 < imgW * imgH * 3 ? dst0 + 2 : 0] = (unsigned char)dr0;
dst[dst1 + 0 < imgW * imgH * 3 ? dst1 + 0 : 0] = (unsigned char)db1;
dst[dst1 + 1 < imgW * imgH * 3 ? dst1 + 1 : 0] = (unsigned char)dg1;
dst[dst1 + 2 < imgW * imgH * 3 ? dst1 + 2 : 0] = (unsigned char)dr1;
dst[dst2 + 0 < imgW * imgH * 3 ? dst2 + 0 : 0] = (unsigned char)db2;
dst[dst2 + 1 < imgW * imgH * 3 ? dst2 + 1 : 0] = (unsigned char)dg2;
dst[dst2 + 2 < imgW * imgH * 3 ? dst2 + 2 : 0] = (unsigned char)dr2;
dst[dst3 + 0 < imgW * imgH * 3 ? dst3 + 0 : 0] = (unsigned char)db3;
dst[dst3 + 1 < imgW * imgH * 3 ? dst3 + 1 : 0] = (unsigned char)dg3;
dst[dst3 + 2 < imgW * imgH * 3 ? dst3 + 2 : 0] = (unsigned char)dr3;
} | 2ff50972500e244f4d8013557239813f523ff68b.cu | #include "includes.h"
__global__ void cudaProcessUnsignedChar0(unsigned char *dst, unsigned char *src, int imgW, int imgH)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx * 2;
int y = blockIdx.y*bh + ty * 2;
int px = y * imgW + x;
bool flag = 0 < y && y < (imgH - 2) && 0 < x && x < (imgW - 2);
int sx1 = flag ? px - imgW : 0;
int sx2 = flag ? px - imgW + 1 : 0;
int sx3 = flag ? px - imgW + 2 : 0;
int sx4 = flag ? px - 1 : 0;
int sx5 = flag ? px : 0;
int sx6 = flag ? px + 1 : 0;
int sx7 = flag ? px + 2 : 0;
int sx8 = flag ? px + imgW - 1 : 0;
int sx9 = flag ? px + imgW : 0;
int sxa = flag ? px + imgW + 1 : 0;
int sxb = flag ? px + imgW + 2 : 0;
int sxc = flag ? px + imgW * 2 - 1 : 0;
int sxd = flag ? px + imgW * 2 : 0;
int sxe = flag ? px + imgW * 2 + 1 : 0;
// G0 R0 G1 R1 x0 x1 x2 x3
// B0 G2 B1 G3 x4 x5 x6 x7
// G4 R2 G5 R3 x8 x9 xA xB
// B2 G6 B3 G7 xC xD xE xF
int g1 = (int)src[sx2];
int g2 = (int)src[sx5];
int g3 = (int)src[sx7];
int g4 = (int)src[sx8];
int g5 = (int)src[sxa];
int g6 = (int)src[sxd];
int b0 = (int)src[sx4];
int b1 = (int)src[sx6];
int b2 = (int)src[sxc];
int b3 = (int)src[sxe];
int r0 = (int)src[sx1];
int r1 = (int)src[sx3];
int r2 = (int)src[sx9];
int r3 = (int)src[sxb];
int db0 = (b0 + b1) >> 1;
int dg0 = g2;
int dr0 = (r0 + r1) >> 1;
int db1 = b1;
int dg1 = (g1 + g2 + g3 + g5) >> 2;
int dr1 = (r0 + r1 + r2 + r3) >> 2;
int db2 = (b0 + b1 + b2 + b3) >> 2;
int dg2 = (g2 + g4 + g5 + g6) >> 2;
int dr2 = r2;
int db3 = (b1 + b3) >> 1;
int dg3 = g5;
int dr3 = (r2 + r3) >> 1;
int dx = px * 3;
int dst0 = dx;
int dst1 = dx + 3;
int dst2 = dx + imgW * 3;
int dst3 = dx + (imgW + 1) * 3;
dst[dst0 + 0 < imgW * imgH * 3 ? dst0 + 0 : 0] = (unsigned char)db0;
dst[dst0 + 1 < imgW * imgH * 3 ? dst0 + 1 : 0] = (unsigned char)dg0;
dst[dst0 + 2 < imgW * imgH * 3 ? dst0 + 2 : 0] = (unsigned char)dr0;
dst[dst1 + 0 < imgW * imgH * 3 ? dst1 + 0 : 0] = (unsigned char)db1;
dst[dst1 + 1 < imgW * imgH * 3 ? dst1 + 1 : 0] = (unsigned char)dg1;
dst[dst1 + 2 < imgW * imgH * 3 ? dst1 + 2 : 0] = (unsigned char)dr1;
dst[dst2 + 0 < imgW * imgH * 3 ? dst2 + 0 : 0] = (unsigned char)db2;
dst[dst2 + 1 < imgW * imgH * 3 ? dst2 + 1 : 0] = (unsigned char)dg2;
dst[dst2 + 2 < imgW * imgH * 3 ? dst2 + 2 : 0] = (unsigned char)dr2;
dst[dst3 + 0 < imgW * imgH * 3 ? dst3 + 0 : 0] = (unsigned char)db3;
dst[dst3 + 1 < imgW * imgH * 3 ? dst3 + 1 : 0] = (unsigned char)dg3;
dst[dst3 + 2 < imgW * imgH * 3 ? dst3 + 2 : 0] = (unsigned char)dr3;
} |
2ad9f0e5d0afc8e24f4ffeea934a89c37151ff47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
#define SIZE 32
//#define separable
//#define gaussian
#define median
__device__ int pooper(int *histogram, int kernelsize)
{
int pixelCounter = 0;
int value;
int prevValue = 0;
for(int i = 0; i < 256; i++) {
// Check if we have traversed half of the kernel (i.e. where the median should be)
// Since a lot of values will be zero in the histogram, we need to count
// the number of elements actually containing "real" information
pixelCounter += histogram[i];
if(pixelCounter > kernelsize/2) {
value = (prevValue != 0) ? (i + prevValue)/2 : i; // Return the median
break;
}
prevValue = (histogram[i] != 0) ? i : prevValue;
}
return value;
}
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
// I.e. original image base coordinate
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// ****** Allocate Shared Memory ****** //
// We need a MAXMEMSIZE since kernelsize specify a filter of size (2*kernelsize + 1)
const int MAXMEMSIZEX = 2 * SIZE + 1;
const int MAXMEMSIZEY = 2 * SIZE + 1;
__shared__ unsigned char smem[MAXMEMSIZEX*MAXMEMSIZEY*3]; // 3 for RGB
// Define our shared memory block
// (Avoid using branching by using max and min)
// ***** THE PROBLEM SEEMS TO BE LOCATED HERE ***** //
int memBlockStartX = max(0, (int)(blockIdx.x*blockDim.x) - kernelsizex);
int memBlockStartY = max(0, (int)(blockIdx.y*blockDim.y) - kernelsizey);
int memBlockEndX = min(imagesizex-1, memBlockStartX + (int)blockDim.x + (2*kernelsizex)); // Using different constants in the last paranthesis seems to alter the result the most
int memBlockEndY = min(imagesizey-1, memBlockStartY + (int)blockDim.y + (2*kernelsizey)); // These values provide a nice result for separable filters though...
// Define thread memory by calculating shared memory block size to actual block size ratio
int memBlockSize = (memBlockEndX - memBlockStartX + 1) * (memBlockEndY - memBlockStartY + 1);
int blocksize = (blockDim.x * blockDim.y)/4;
int threadMem = (int)(memBlockSize/(blocksize));
int memSizeX = memBlockEndX - memBlockStartX + 1;
// Load the ammount of pixel memory allowed for each thread to shared memory
for(int i = 0; i <= threadMem; i++) {// TODO: Find corresponding image data to memory index, no RGB?
// (Remember, our Shared Memory is a 1D array)
// Traverse our shared memory block
int memIndex = (threadIdx.x + threadIdx.y * memSizeX + i * blocksize);
int memCurrentX = memIndex % memSizeX;
int memCurrentY = (int)((memIndex - memCurrentX) / memSizeX);
// TODO: Add RGB functionality
memIndex *= 3;
// Map to image index
int imgX = memBlockStartX + memCurrentX;
int imgY = memBlockStartY + memCurrentY;
int imgIndex = 3 * (imgX + imgY * imagesizex);
if( memIndex <= 3 * memBlockSize ) {
smem[memIndex+0] = image[imgIndex];
smem[memIndex+1] = image[imgIndex+1];
smem[memIndex+2] = image[imgIndex+2];
}
}
__syncthreads();
// ****** Actual Filter ****** //
int dy, dx;
#ifndef median
unsigned int sumx, sumy, sumz;
sumx=0;sumy=0;sumz=0;
#endif
// Shared Memory coordinates
int sx = x - memBlockStartX;
int sy = y - memBlockStartY;
#ifdef gaussian
// Define gaussian kernel weights for 5 x 5 filter kernel
int weights[] = {1, 4, 6, 4, 1};
int divby = 16;
#else
int divby = (2*kernelsizex+1) * (2*kernelsizex+1); // Works for box filters only!
#endif
if (x < imagesizex && y < imagesizey) // If inside image
{
#ifdef median
// Median filtering can be done without sorting is we use a histogram instead
int histogramX[256];
int histogramY[256];
int histogramZ[256];
for(int i = 0; i < 256; i++) {
histogramX[i] = 0;
histogramY[i] = 0;
histogramZ[i] = 0;
}
#endif
// Filter kernel
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
int xx = min(max(sx+dx, 0), memBlockEndX);
int yy = min(max(sy+dy, 0), memBlockEndY);
int sharedIndex = 3* (xx + memSizeX*yy);
#ifdef gaussian
// For gaussian filter we use the stencil to filter
int stencil = weights[dx+dy+2];
sumx += stencil * smem[sharedIndex];
sumy += stencil * smem[sharedIndex+1];
sumz += stencil * smem[sharedIndex+2];
#elif defined(median)
histogramX[(int)(smem[sharedIndex])] += 1;
histogramY[(int)(smem[sharedIndex+1])]+= 1;
histogramZ[(int)(smem[sharedIndex+2])]+= 1;
#else
// Instead, collect data from Shared Memory rather than Global Memory
sumx += smem[sharedIndex];
sumy += smem[sharedIndex+1];
sumz += smem[sharedIndex+2];
#endif
}
#ifdef median
out[(y*imagesizex+x)*3+0] = pooper(histogramX, divby);
out[(y*imagesizex+x)*3+1] = pooper(histogramY, divby);
out[(y*imagesizex+x)*3+2] = pooper(histogramZ, divby);
#else
out[(y*imagesizex+x)*3+0] = sumx/divby;
out[(y*imagesizex+x)*3+1] = sumy/divby;
out[(y*imagesizex+x)*3+2] = sumz/divby;
#endif
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input, *dev_temp;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// MAIN COMPUTATION FUNCTION
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
// ***** OUR BLOCKSIZE VARIABLE IS PROVIDING SOME WEIRD OUTPUTS IF CHANGED AS WELL ****** //
// For boxfilters we cannot use a blocksize >= 10
int blocksize = 4;
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
hipMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
hipMemcpy( dev_input, image, imagesizey*imagesizex*3, hipMemcpyHostToDevice );
hipMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
hipMalloc( (void**)&dev_temp, imagesizex * imagesizey * 3);
#if defined(gaussian) || defined(separable)
// If we want to use separable filter kernels, run this code
dim3 grid1(imagesizex/(blocksize), imagesizey);
dim3 blockGrid1(blocksize,1);
dim3 grid2(imagesizex*3, imagesizey/blocksize);
dim3 blockGrid2(3, blocksize);
hipLaunchKernelGGL(( filter), dim3(grid1), dim3(blockGrid1), 0, 0, dev_input, dev_temp, imagesizex, imagesizey, kernelsizex, 0); // Output goes into temp variable, no kernelsizey
hipLaunchKernelGGL(( filter), dim3(grid2), dim3(blockGrid2), 0, 0, dev_temp, dev_bitmap, imagesizex, imagesizey, 0, kernelsizey); // Input is temp variable here, no kernelsizex
#else
// "Normal" box-filter kernel
dim3 grid(imagesizex/ blocksize, imagesizey / blocksize);
dim3 blockGrid(3*blocksize, blocksize);
hipLaunchKernelGGL(( filter), dim3(grid), dim3(blockGrid), 0, 0, dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // Awful load balance
#endif
hipDeviceSynchronize();
// Check for errors!
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, hipMemcpyDeviceToHost );
hipFree( dev_bitmap );
hipFree( dev_input );
#ifdef separable
hipFree( dev_temp);
#endif
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
printf("\n*----------- PROGRAM INFO -----------* \n\n");
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
#ifdef median
image = readppm((char *)"maskros-noisy.ppm", (int *)&imagesizex, (int *)&imagesizey);
#else
image = readppm((char *)"img1.ppm", (int *)&imagesizex, (int *)&imagesizey);
#endif
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
int filterX = 5;
int filterY = 5;
ResetMilli();
computeImages(filterX, filterY);
int time = GetMicroseconds();
printf("\n*----------- BENCHMARKING -----------*");
#ifdef separable
printf("\n\nSeparable filter");
#elif defined gaussian
printf("\n\nGaussian filter");
#elif defined(median)
printf("\n\nMedian filter");
#else
printf("\n\nBox filter\n");
#endif
printf("\n\nKernel size %ix%i", filterX, filterY);
printf("\n\nFiltering took %i microseconds. \n\n", time );
// You can save the result to a file like this:
writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
| 2ad9f0e5d0afc8e24f4ffeea934a89c37151ff47.cu | // Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
#define SIZE 32
//#define separable
//#define gaussian
#define median
__device__ int pooper(int *histogram, int kernelsize)
{
int pixelCounter = 0;
int value;
int prevValue = 0;
for(int i = 0; i < 256; i++) {
// Check if we have traversed half of the kernel (i.e. where the median should be)
// Since a lot of values will be zero in the histogram, we need to count
// the number of elements actually containing "real" information
pixelCounter += histogram[i];
if(pixelCounter > kernelsize/2) {
value = (prevValue != 0) ? (i + prevValue)/2 : i; // Return the median
break;
}
prevValue = (histogram[i] != 0) ? i : prevValue;
}
return value;
}
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
// I.e. original image base coordinate
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// ****** Allocate Shared Memory ****** //
// We need a MAXMEMSIZE since kernelsize specify a filter of size (2*kernelsize + 1)
const int MAXMEMSIZEX = 2 * SIZE + 1;
const int MAXMEMSIZEY = 2 * SIZE + 1;
__shared__ unsigned char smem[MAXMEMSIZEX*MAXMEMSIZEY*3]; // 3 for RGB
// Define our shared memory block
// (Avoid using branching by using max and min)
// ***** THE PROBLEM SEEMS TO BE LOCATED HERE ***** //
int memBlockStartX = max(0, (int)(blockIdx.x*blockDim.x) - kernelsizex);
int memBlockStartY = max(0, (int)(blockIdx.y*blockDim.y) - kernelsizey);
int memBlockEndX = min(imagesizex-1, memBlockStartX + (int)blockDim.x + (2*kernelsizex)); // Using different constants in the last paranthesis seems to alter the result the most
int memBlockEndY = min(imagesizey-1, memBlockStartY + (int)blockDim.y + (2*kernelsizey)); // These values provide a nice result for separable filters though...
// Define thread memory by calculating shared memory block size to actual block size ratio
int memBlockSize = (memBlockEndX - memBlockStartX + 1) * (memBlockEndY - memBlockStartY + 1);
int blocksize = (blockDim.x * blockDim.y)/4;
int threadMem = (int)(memBlockSize/(blocksize));
int memSizeX = memBlockEndX - memBlockStartX + 1;
// Load the ammount of pixel memory allowed for each thread to shared memory
for(int i = 0; i <= threadMem; i++) {// TODO: Find corresponding image data to memory index, no RGB?
// (Remember, our Shared Memory is a 1D array)
// Traverse our shared memory block
int memIndex = (threadIdx.x + threadIdx.y * memSizeX + i * blocksize);
int memCurrentX = memIndex % memSizeX;
int memCurrentY = (int)((memIndex - memCurrentX) / memSizeX);
// TODO: Add RGB functionality
memIndex *= 3;
// Map to image index
int imgX = memBlockStartX + memCurrentX;
int imgY = memBlockStartY + memCurrentY;
int imgIndex = 3 * (imgX + imgY * imagesizex);
if( memIndex <= 3 * memBlockSize ) {
smem[memIndex+0] = image[imgIndex];
smem[memIndex+1] = image[imgIndex+1];
smem[memIndex+2] = image[imgIndex+2];
}
}
__syncthreads();
// ****** Actual Filter ****** //
int dy, dx;
#ifndef median
unsigned int sumx, sumy, sumz;
sumx=0;sumy=0;sumz=0;
#endif
// Shared Memory coordinates
int sx = x - memBlockStartX;
int sy = y - memBlockStartY;
#ifdef gaussian
// Define gaussian kernel weights for 5 x 5 filter kernel
int weights[] = {1, 4, 6, 4, 1};
int divby = 16;
#else
int divby = (2*kernelsizex+1) * (2*kernelsizex+1); // Works for box filters only!
#endif
if (x < imagesizex && y < imagesizey) // If inside image
{
#ifdef median
// Median filtering can be done without sorting is we use a histogram instead
int histogramX[256];
int histogramY[256];
int histogramZ[256];
for(int i = 0; i < 256; i++) {
histogramX[i] = 0;
histogramY[i] = 0;
histogramZ[i] = 0;
}
#endif
// Filter kernel
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
int xx = min(max(sx+dx, 0), memBlockEndX);
int yy = min(max(sy+dy, 0), memBlockEndY);
int sharedIndex = 3* (xx + memSizeX*yy);
#ifdef gaussian
// For gaussian filter we use the stencil to filter
int stencil = weights[dx+dy+2];
sumx += stencil * smem[sharedIndex];
sumy += stencil * smem[sharedIndex+1];
sumz += stencil * smem[sharedIndex+2];
#elif defined(median)
histogramX[(int)(smem[sharedIndex])] += 1;
histogramY[(int)(smem[sharedIndex+1])]+= 1;
histogramZ[(int)(smem[sharedIndex+2])]+= 1;
#else
// Instead, collect data from Shared Memory rather than Global Memory
sumx += smem[sharedIndex];
sumy += smem[sharedIndex+1];
sumz += smem[sharedIndex+2];
#endif
}
#ifdef median
out[(y*imagesizex+x)*3+0] = pooper(histogramX, divby);
out[(y*imagesizex+x)*3+1] = pooper(histogramY, divby);
out[(y*imagesizex+x)*3+2] = pooper(histogramZ, divby);
#else
out[(y*imagesizex+x)*3+0] = sumx/divby;
out[(y*imagesizex+x)*3+1] = sumy/divby;
out[(y*imagesizex+x)*3+2] = sumz/divby;
#endif
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input, *dev_temp;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// MAIN COMPUTATION FUNCTION
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
// ***** OUR BLOCKSIZE VARIABLE IS PROVIDING SOME WEIRD OUTPUTS IF CHANGED AS WELL ****** //
// For boxfilters we cannot use a blocksize >= 10
int blocksize = 4;
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
cudaMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
cudaMemcpy( dev_input, image, imagesizey*imagesizex*3, cudaMemcpyHostToDevice );
cudaMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
cudaMalloc( (void**)&dev_temp, imagesizex * imagesizey * 3);
#if defined(gaussian) || defined(separable)
// If we want to use separable filter kernels, run this code
dim3 grid1(imagesizex/(blocksize), imagesizey);
dim3 blockGrid1(blocksize,1);
dim3 grid2(imagesizex*3, imagesizey/blocksize);
dim3 blockGrid2(3, blocksize);
filter<<<grid1, blockGrid1>>>(dev_input, dev_temp, imagesizex, imagesizey, kernelsizex, 0); // Output goes into temp variable, no kernelsizey
filter<<<grid2, blockGrid2>>>(dev_temp, dev_bitmap, imagesizex, imagesizey, 0, kernelsizey); // Input is temp variable here, no kernelsizex
#else
// "Normal" box-filter kernel
dim3 grid(imagesizex/ blocksize, imagesizey / blocksize);
dim3 blockGrid(3*blocksize, blocksize);
filter<<<grid, blockGrid>>>(dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // Awful load balance
#endif
cudaThreadSynchronize();
// Check for errors!
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, cudaMemcpyDeviceToHost );
cudaFree( dev_bitmap );
cudaFree( dev_input );
#ifdef separable
cudaFree( dev_temp);
#endif
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
printf("\n*----------- PROGRAM INFO -----------* \n\n");
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
#ifdef median
image = readppm((char *)"maskros-noisy.ppm", (int *)&imagesizex, (int *)&imagesizey);
#else
image = readppm((char *)"img1.ppm", (int *)&imagesizex, (int *)&imagesizey);
#endif
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
int filterX = 5;
int filterY = 5;
ResetMilli();
computeImages(filterX, filterY);
int time = GetMicroseconds();
printf("\n*----------- BENCHMARKING -----------*");
#ifdef separable
printf("\n\nSeparable filter");
#elif defined gaussian
printf("\n\nGaussian filter");
#elif defined(median)
printf("\n\nMedian filter");
#else
printf("\n\nBox filter\n");
#endif
printf("\n\nKernel size %ix%i", filterX, filterY);
printf("\n\nFiltering took %i microseconds. \n\n", time );
// You can save the result to a file like this:
writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
|
6cd96dfb9d1a8c6c0be44488d3c1941b86bc65bc.hip | // !!! This is a file automatically generated by hipify!!!
// nvcc bugs: cannot import json.hpp without errors:
// https://github.com/nlohmann/json/issues/1347
#define RLS_IGNORE_JSON
/*
* Copyright 2010-2017 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain device timestamps
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cupti.h>
#include "range_sampling.h"
#include "common_util.h"
using rlscope::MyStatus;
//using rlscope::error;
//#define CHECK_CU_ERROR(err, cufunc)
// if (err != hipSuccess)
// {
// printf ("%s:%d: error %d for CUDA Driver API function '%s'n",
// __FILE__, __LINE__, err, cufunc);
// exit(-1);
// }
//
//#define CHECK_CUPTI_ERROR(err, cuptifunc)
// if (err != CUPTI_SUCCESS)
// {
// const char *errstr;
// cuptiGetResultString(err, &errstr);
// printf ("%s:%d:Error %s for CUPTI API function '%s'.n",
// __FILE__, __LINE__, errstr, cuptifunc);
// exit(-1);
// }
// Structure to hold data collected by callback
typedef struct RuntimeApiTrace_st {
const char *functionName;
uint64_t startTimestamp;
uint64_t endTimestamp;
size_t memcpy_bytes;
enum hipMemcpyKind memcpy_kind;
} RuntimeApiTrace_t;
enum launchOrder{ MEMCPY_H2D1, MEMCPY_H2D2, MEMCPY_D2H, KERNEL, THREAD_SYNC, LAUNCH_LAST};
// Vector addition kernel
__global__ void
VecAdd(const int* A, const int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Initialize a vector
static void
initVec(int *vec, int n)
{
for (int i = 0; i < n; i++)
vec[i] = i;
}
void CUPTIAPI
getTimestampCallback(void *userdata, CUpti_CallbackDomain domain,
CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo)
{
static int memTransCount = 0;
uint64_t startTimestamp;
uint64_t endTimestamp;
RuntimeApiTrace_t *traceData = (RuntimeApiTrace_t*)userdata;
CUptiResult cuptiErr;
// Data is collected only for the following API
if ((cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020)) {
// Set pointer depending on API
if ((cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000))
{
traceData = traceData + KERNEL;
}
else if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020)
traceData = traceData + THREAD_SYNC;
else if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020)
traceData = traceData + MEMCPY_H2D1 + memTransCount;
if (cbInfo->callbackSite == CUPTI_API_ENTER) {
// for a kernel launch report the kernel name, otherwise use the API
// function name.
if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 ||
cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000)
{
traceData->functionName = cbInfo->symbolName;
}
else {
traceData->functionName = cbInfo->functionName;
}
// Store parameters passed to hipMemcpy
if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020) {
traceData->memcpy_bytes = ((cudaMemcpy_v3020_params *)(cbInfo->functionParams))->count;
traceData->memcpy_kind = ((cudaMemcpy_v3020_params *)(cbInfo->functionParams))->kind;
}
// Collect timestamp for API start
CUPTI_API_CALL_MAYBE_EXIT(cuptiDeviceGetTimestamp(cbInfo->context, &startTimestamp));
traceData->startTimestamp = startTimestamp;
}
if (cbInfo->callbackSite == CUPTI_API_EXIT) {
// Collect timestamp for API exit
CUPTI_API_CALL_MAYBE_EXIT(cuptiDeviceGetTimestamp(cbInfo->context, &endTimestamp));
traceData->endTimestamp = endTimestamp;
// Advance to the next memory transfer operation
if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020) {
memTransCount++;
}
}
}
}
static const char *
memcpyKindStr(enum hipMemcpyKind kind)
{
switch (kind) {
case hipMemcpyHostToDevice:
return "HostToDevice";
case hipMemcpyDeviceToHost:
return "DeviceToHost";
default:
break;
}
return "<unknown>";
}
static void
displayTimestamps(RuntimeApiTrace_t *trace)
{
// Calculate timestamp of kernel based on timestamp from
// hipDeviceSynchronize() call
trace[KERNEL].endTimestamp = trace[THREAD_SYNC].endTimestamp;
printf("startTimeStamp/gpuTime reported in nano-seconds\n\n");
printf("Name\t\tStart Time\t\tGPU Time\tBytes\tKind\n");
printf("%s\t%llu\t%llu\t\t%llu\t%s\n", trace[MEMCPY_H2D1].functionName,
(unsigned long long)trace[MEMCPY_H2D1].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D1].endTimestamp - trace[MEMCPY_H2D1].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D1].memcpy_bytes,
memcpyKindStr(trace[MEMCPY_H2D1].memcpy_kind));
printf("%s\t%llu\t%llu\t\t%llu\t%s\n", trace[MEMCPY_H2D2].functionName,
(unsigned long long)trace[MEMCPY_H2D2].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D2].endTimestamp - trace[MEMCPY_H2D2].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D2].memcpy_bytes,
memcpyKindStr(trace[MEMCPY_H2D2].memcpy_kind));
printf("%s\t%llu\t%llu\t\tNA\tNA\n", trace[KERNEL].functionName,
(unsigned long long)trace[KERNEL].startTimestamp,
(unsigned long long)trace[KERNEL].endTimestamp - trace[KERNEL].startTimestamp);
printf("%s\t%llu\t%llu\t\t%llu\t%s\n", trace[MEMCPY_D2H].functionName,
(unsigned long long)trace[MEMCPY_D2H].startTimestamp,
(unsigned long long)trace[MEMCPY_D2H].endTimestamp - trace[MEMCPY_D2H].startTimestamp,
(unsigned long long)trace[MEMCPY_D2H].memcpy_bytes,
memcpyKindStr(trace[MEMCPY_D2H].memcpy_kind));
}
static void
cleanUp(int *h_A, int *h_B, int *h_C, int *d_A, int *d_B, int *d_C)
{
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
int main(int argc, char* argv[]) {
backward::SignalHandling sh;
// gflags::ParseCommandLineFlags(&argc, &argv, true);
// NOTE: If you only define SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG, this doesn't enable debug logging.
// It just ensures that the SPDLOG_DEBUG statements are **compiled in**!
// We still need to turn them on though!
spdlog::set_level(static_cast<spdlog::level::level_enum>(SPDLOG_ACTIVE_LEVEL));
// RLS_LOG("CB", "HELLO WORLD", "");
// std::cout << "HELLO WORLD" << std::endl;
hipCtx_t context = 0;
hipDevice_t device = 0;
hipError_t cuerr;
CUptiResult cuptierr;
int N = 50000;
size_t size = N * sizeof(int);
int threadsPerBlock = 0;
int blocksPerGrid = 0;
int sum;
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
CUpti_SubscriberHandle subscriber;
RuntimeApiTrace_t trace[LAUNCH_LAST];
DRIVER_API_CALL_MAYBE_EXIT(hipInit(0));
DRIVER_API_CALL_MAYBE_EXIT(hipCtxCreate(&context, 0, device));
CUPTI_API_CALL_MAYBE_EXIT(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc) getTimestampCallback, &trace));
CUPTI_API_CALL_MAYBE_EXIT(cuptiEnableDomain(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API));
// Allocate input vectors h_A and h_B in host memory
h_A = (int *) malloc(size);
h_B = (int *) malloc(size);
h_C = (int *) malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
hipMalloc((void **) &d_A, size);
hipMalloc((void **) &d_B, size);
hipMalloc((void **) &d_C, size);
auto run_pass = [&](rlscope::GPUHwCounterSampler &sampler) {
MyStatus ret;
ret = sampler.StartPass();
IF_BAD_STATUS_RETURN(ret);
// IF_BAD_STATUS_EXIT("Failed to start configuration pass for GPU hw counter profiler", ret);
ret = sampler.Push("VecAdd");
IF_BAD_STATUS_RETURN(ret);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
threadsPerBlock = 256;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N);
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Verify result
for (int i = 0; i < N; ++i) {
sum = h_A[i] + h_B[i];
if (h_C[i] != sum) {
std::stringstream ss;
ss << "kernel execution FAILED";
MyStatus status(rlscope::error::INVALID_ARGUMENT, ss.str());
return status;
}
}
displayTimestamps(trace);
ret = sampler.Pop();
IF_BAD_STATUS_RETURN(ret);
ret = sampler.EndPass();
IF_BAD_STATUS_RETURN(ret);
// IF_BAD_STATUS_EXIT("Failed to end configuration pass for GPU hw counter profiler", ret);
return MyStatus::OK();
};
{
MyStatus ret = MyStatus::OK();
rlscope::GPUHwCounterSampler sampler(device, ".", "");
ret = sampler.Init();
IF_BAD_STATUS_EXIT("Failed to initialize GPU hw counter profiler", ret);
// Get the names of the metrics to collect
std::vector<std::string> metricNames;
metricNames = rlscope::StringSplit(rlscope::get_DEFAULT_METRICS_STR(), ",");
ret = sampler.StartConfig(metricNames);
IF_BAD_STATUS_EXIT("Failed to configure GPU hw counter profiler", ret);
int64_t config_passes = 1;
for (int64_t i = 0; i < config_passes; i++) {
ret = run_pass(sampler);
if (ret.code() != rlscope::error::OK) {
std::stringstream ss;
ss << "Failed to run configuration pass " << i << " with GPU hw counter profiler enabled";
IF_BAD_STATUS_EXIT(ss.str(), ret);
}
}
ret = sampler.StartProfiling();
IF_BAD_STATUS_EXIT("Failed to start GPU hw counter profiler", ret);
// for (int64_t i = 0; i < FLAGS_samples; i++) {
while (sampler.HasNextPass()) {
DBG_LOG("Pass {}", sampler._pass_idx + 1);
ret = run_pass(sampler);
IF_BAD_STATUS_EXIT("Failed to run pass with GPU hw counter profiler enabled", ret);
}
if (sampler.CanRecord()) {
ret = sampler.RecordSample();
IF_BAD_STATUS_EXIT("Failed to record GPU hw counter sample", ret);
}
// }
ret = sampler.StopProfiling();
IF_BAD_STATUS_EXIT("Failed to stop GPU hw counter profiler", ret);
} // Destruct sampler.
CUPTI_API_CALL_MAYBE_EXIT(cuptiUnsubscribe(subscriber));
cleanUp(h_A, h_B, h_C, d_A, d_B, d_C);
hipDeviceSynchronize();
return 0;
}
| 6cd96dfb9d1a8c6c0be44488d3c1941b86bc65bc.cu | // nvcc bugs: cannot import json.hpp without errors:
// https://github.com/nlohmann/json/issues/1347
#define RLS_IGNORE_JSON
/*
* Copyright 2010-2017 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain device timestamps
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <cuda.h>
#include <cupti.h>
#include "range_sampling.h"
#include "common_util.h"
using rlscope::MyStatus;
//using rlscope::error;
//#define CHECK_CU_ERROR(err, cufunc)
// if (err != CUDA_SUCCESS)
// {
// printf ("%s:%d: error %d for CUDA Driver API function '%s'n",
// __FILE__, __LINE__, err, cufunc);
// exit(-1);
// }
//
//#define CHECK_CUPTI_ERROR(err, cuptifunc)
// if (err != CUPTI_SUCCESS)
// {
// const char *errstr;
// cuptiGetResultString(err, &errstr);
// printf ("%s:%d:Error %s for CUPTI API function '%s'.n",
// __FILE__, __LINE__, errstr, cuptifunc);
// exit(-1);
// }
// Structure to hold data collected by callback
typedef struct RuntimeApiTrace_st {
const char *functionName;
uint64_t startTimestamp;
uint64_t endTimestamp;
size_t memcpy_bytes;
enum cudaMemcpyKind memcpy_kind;
} RuntimeApiTrace_t;
enum launchOrder{ MEMCPY_H2D1, MEMCPY_H2D2, MEMCPY_D2H, KERNEL, THREAD_SYNC, LAUNCH_LAST};
// Vector addition kernel
__global__ void
VecAdd(const int* A, const int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Initialize a vector
static void
initVec(int *vec, int n)
{
for (int i = 0; i < n; i++)
vec[i] = i;
}
void CUPTIAPI
getTimestampCallback(void *userdata, CUpti_CallbackDomain domain,
CUpti_CallbackId cbid, const CUpti_CallbackData *cbInfo)
{
static int memTransCount = 0;
uint64_t startTimestamp;
uint64_t endTimestamp;
RuntimeApiTrace_t *traceData = (RuntimeApiTrace_t*)userdata;
CUptiResult cuptiErr;
// Data is collected only for the following API
if ((cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020)) {
// Set pointer depending on API
if ((cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020) ||
(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000))
{
traceData = traceData + KERNEL;
}
else if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaDeviceSynchronize_v3020)
traceData = traceData + THREAD_SYNC;
else if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020)
traceData = traceData + MEMCPY_H2D1 + memTransCount;
if (cbInfo->callbackSite == CUPTI_API_ENTER) {
// for a kernel launch report the kernel name, otherwise use the API
// function name.
if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 ||
cbid == CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000)
{
traceData->functionName = cbInfo->symbolName;
}
else {
traceData->functionName = cbInfo->functionName;
}
// Store parameters passed to cudaMemcpy
if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020) {
traceData->memcpy_bytes = ((cudaMemcpy_v3020_params *)(cbInfo->functionParams))->count;
traceData->memcpy_kind = ((cudaMemcpy_v3020_params *)(cbInfo->functionParams))->kind;
}
// Collect timestamp for API start
CUPTI_API_CALL_MAYBE_EXIT(cuptiDeviceGetTimestamp(cbInfo->context, &startTimestamp));
traceData->startTimestamp = startTimestamp;
}
if (cbInfo->callbackSite == CUPTI_API_EXIT) {
// Collect timestamp for API exit
CUPTI_API_CALL_MAYBE_EXIT(cuptiDeviceGetTimestamp(cbInfo->context, &endTimestamp));
traceData->endTimestamp = endTimestamp;
// Advance to the next memory transfer operation
if (cbid == CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020) {
memTransCount++;
}
}
}
}
static const char *
memcpyKindStr(enum cudaMemcpyKind kind)
{
switch (kind) {
case cudaMemcpyHostToDevice:
return "HostToDevice";
case cudaMemcpyDeviceToHost:
return "DeviceToHost";
default:
break;
}
return "<unknown>";
}
static void
displayTimestamps(RuntimeApiTrace_t *trace)
{
// Calculate timestamp of kernel based on timestamp from
// cudaDeviceSynchronize() call
trace[KERNEL].endTimestamp = trace[THREAD_SYNC].endTimestamp;
printf("startTimeStamp/gpuTime reported in nano-seconds\n\n");
printf("Name\t\tStart Time\t\tGPU Time\tBytes\tKind\n");
printf("%s\t%llu\t%llu\t\t%llu\t%s\n", trace[MEMCPY_H2D1].functionName,
(unsigned long long)trace[MEMCPY_H2D1].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D1].endTimestamp - trace[MEMCPY_H2D1].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D1].memcpy_bytes,
memcpyKindStr(trace[MEMCPY_H2D1].memcpy_kind));
printf("%s\t%llu\t%llu\t\t%llu\t%s\n", trace[MEMCPY_H2D2].functionName,
(unsigned long long)trace[MEMCPY_H2D2].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D2].endTimestamp - trace[MEMCPY_H2D2].startTimestamp,
(unsigned long long)trace[MEMCPY_H2D2].memcpy_bytes,
memcpyKindStr(trace[MEMCPY_H2D2].memcpy_kind));
printf("%s\t%llu\t%llu\t\tNA\tNA\n", trace[KERNEL].functionName,
(unsigned long long)trace[KERNEL].startTimestamp,
(unsigned long long)trace[KERNEL].endTimestamp - trace[KERNEL].startTimestamp);
printf("%s\t%llu\t%llu\t\t%llu\t%s\n", trace[MEMCPY_D2H].functionName,
(unsigned long long)trace[MEMCPY_D2H].startTimestamp,
(unsigned long long)trace[MEMCPY_D2H].endTimestamp - trace[MEMCPY_D2H].startTimestamp,
(unsigned long long)trace[MEMCPY_D2H].memcpy_bytes,
memcpyKindStr(trace[MEMCPY_D2H].memcpy_kind));
}
static void
cleanUp(int *h_A, int *h_B, int *h_C, int *d_A, int *d_B, int *d_C)
{
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
int main(int argc, char* argv[]) {
backward::SignalHandling sh;
// gflags::ParseCommandLineFlags(&argc, &argv, true);
// NOTE: If you only define SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_DEBUG, this doesn't enable debug logging.
// It just ensures that the SPDLOG_DEBUG statements are **compiled in**!
// We still need to turn them on though!
spdlog::set_level(static_cast<spdlog::level::level_enum>(SPDLOG_ACTIVE_LEVEL));
// RLS_LOG("CB", "HELLO WORLD", "");
// std::cout << "HELLO WORLD" << std::endl;
CUcontext context = 0;
CUdevice device = 0;
CUresult cuerr;
CUptiResult cuptierr;
int N = 50000;
size_t size = N * sizeof(int);
int threadsPerBlock = 0;
int blocksPerGrid = 0;
int sum;
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
CUpti_SubscriberHandle subscriber;
RuntimeApiTrace_t trace[LAUNCH_LAST];
DRIVER_API_CALL_MAYBE_EXIT(cuInit(0));
DRIVER_API_CALL_MAYBE_EXIT(cuCtxCreate(&context, 0, device));
CUPTI_API_CALL_MAYBE_EXIT(cuptiSubscribe(&subscriber, (CUpti_CallbackFunc) getTimestampCallback, &trace));
CUPTI_API_CALL_MAYBE_EXIT(cuptiEnableDomain(1, subscriber, CUPTI_CB_DOMAIN_RUNTIME_API));
// Allocate input vectors h_A and h_B in host memory
h_A = (int *) malloc(size);
h_B = (int *) malloc(size);
h_C = (int *) malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
auto run_pass = [&](rlscope::GPUHwCounterSampler &sampler) {
MyStatus ret;
ret = sampler.StartPass();
IF_BAD_STATUS_RETURN(ret);
// IF_BAD_STATUS_EXIT("Failed to start configuration pass for GPU hw counter profiler", ret);
ret = sampler.Push("VecAdd");
IF_BAD_STATUS_RETURN(ret);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
threadsPerBlock = 256;
blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
cudaDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Verify result
for (int i = 0; i < N; ++i) {
sum = h_A[i] + h_B[i];
if (h_C[i] != sum) {
std::stringstream ss;
ss << "kernel execution FAILED";
MyStatus status(rlscope::error::INVALID_ARGUMENT, ss.str());
return status;
}
}
displayTimestamps(trace);
ret = sampler.Pop();
IF_BAD_STATUS_RETURN(ret);
ret = sampler.EndPass();
IF_BAD_STATUS_RETURN(ret);
// IF_BAD_STATUS_EXIT("Failed to end configuration pass for GPU hw counter profiler", ret);
return MyStatus::OK();
};
{
MyStatus ret = MyStatus::OK();
rlscope::GPUHwCounterSampler sampler(device, ".", "");
ret = sampler.Init();
IF_BAD_STATUS_EXIT("Failed to initialize GPU hw counter profiler", ret);
// Get the names of the metrics to collect
std::vector<std::string> metricNames;
metricNames = rlscope::StringSplit(rlscope::get_DEFAULT_METRICS_STR(), ",");
ret = sampler.StartConfig(metricNames);
IF_BAD_STATUS_EXIT("Failed to configure GPU hw counter profiler", ret);
int64_t config_passes = 1;
for (int64_t i = 0; i < config_passes; i++) {
ret = run_pass(sampler);
if (ret.code() != rlscope::error::OK) {
std::stringstream ss;
ss << "Failed to run configuration pass " << i << " with GPU hw counter profiler enabled";
IF_BAD_STATUS_EXIT(ss.str(), ret);
}
}
ret = sampler.StartProfiling();
IF_BAD_STATUS_EXIT("Failed to start GPU hw counter profiler", ret);
// for (int64_t i = 0; i < FLAGS_samples; i++) {
while (sampler.HasNextPass()) {
DBG_LOG("Pass {}", sampler._pass_idx + 1);
ret = run_pass(sampler);
IF_BAD_STATUS_EXIT("Failed to run pass with GPU hw counter profiler enabled", ret);
}
if (sampler.CanRecord()) {
ret = sampler.RecordSample();
IF_BAD_STATUS_EXIT("Failed to record GPU hw counter sample", ret);
}
// }
ret = sampler.StopProfiling();
IF_BAD_STATUS_EXIT("Failed to stop GPU hw counter profiler", ret);
} // Destruct sampler.
CUPTI_API_CALL_MAYBE_EXIT(cuptiUnsubscribe(subscriber));
cleanUp(h_A, h_B, h_C, d_A, d_B, d_C);
cudaDeviceSynchronize();
return 0;
}
|
0b6f4b70b69079e38c0cef68ceb920c93f2f3e68.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************************
Copyright 2021 Charles W. Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**********************************************************************************/
//
// Filename: BF_kernels.cu
// Author: Charles W Johnson
// Description: Kernels for MPI GPU-based Bellman-Ford algorithm
//
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include "BF_kernels.cuh"
using namespace std;
/* ---- CPU/Wrapper Functions ---- */
// Name: initialize_dp_array_Wrapper
//
// Description: CPU wrapper function to execute the kernel to initialize the dp array
//
//
void initialize_dp_array_Wrapper(dim3 grid, dim3 blocks, distPred* d_dp, int infinity, int source, int num_vertices)
{
hipGetLastError(); // reset the runtime error variable to hipSuccess
hipLaunchKernelGGL(( initialize_dp_array), dim3(grid), dim3(blocks), 0, 0, d_dp, infinity, source, num_vertices);
}
// Name: BellmanFord_GPU_Wrapper
//
// Description: CPU wrapper function to execute the B-F kernel
//
//
void BellmanFord_GPU_Wrapper(int proc_start_edge, int proc_num_edges, uint32_t num_vertices, uint32_t num_edges,
Edge* d_edgeList, distPred* d_dp, bool BF_short, bool& finished,
dim3 grid, dim3 blocks)
{
// since CUDA is whining about using bools and I can't find a fix,
// I'm going to use ints - 1 is true, 0 is false
int h_change = 0;
int *d_change = 0;
hipMalloc((void**) &d_change, sizeof(int));
finished = true;
for (int i=1; i < (num_vertices-1); i++)
{
// we make the local change false
h_change = 0;
// we copy the local value to the device
hipMemcpy(d_change, &h_change, sizeof(int), hipMemcpyHostToDevice);
// we then run the kernel
hipLaunchKernelGGL(( cudaBellmanFord), dim3(grid), dim3(blocks), 0, 0, proc_start_edge, proc_num_edges, d_edgeList,
d_dp, d_change);
hipDeviceSynchronize();
// we now copy the value from the device back to the local variable
hipMemcpy(&h_change, d_change, sizeof(int), hipMemcpyDeviceToHost);
// if the device is reporting a change, then we are not finished
//
if (h_change == 1) {
finished = false;
}
if (BF_short == true) {
if (!h_change) {
break;
}
}
}
hipFree(d_change);
}
/* ---- Kernels ---- */
// Name: initialize_dp_array
//
// Description: Initializes the d_dp array
//
//
__global__ void initialize_dp_array(distPred* d_dp, int infinity, int source, int num_vertices)
{
uint32_t tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if (tid < num_vertices)
{
d_dp[tid].dist = infinity;
d_dp[tid].pred = (int)NULL;
if (tid == source) {
d_dp[tid].dist = 0;
}
}
}
// Name: cudaBellmanFord
//
// Description: Executes the Bellman-Ford algorithm on the specified edges
//
//
__global__ void cudaBellmanFord(int proc_start_edge, int proc_num_edges, Edge* d_edgeList,
distPred* d_dp, int* d_change)
{
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int edge = proc_start_edge + tid;
int u, v, w;
if (edge < (proc_start_edge + proc_num_edges))
{
u = d_edgeList[edge].u;
v = d_edgeList[edge].v;
w = d_edgeList[edge].w;
if ((d_dp[u].dist + w) < d_dp[v].dist)
{
d_dp[v].dist = (d_dp[u].dist + w);
d_dp[v].pred = u;
*d_change = 1;
}
}
}
| 0b6f4b70b69079e38c0cef68ceb920c93f2f3e68.cu |
/**********************************************************************************
Copyright 2021 Charles W. Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**********************************************************************************/
//
// Filename: BF_kernels.cu
// Author: Charles W Johnson
// Description: Kernels for MPI GPU-based Bellman-Ford algorithm
//
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include "BF_kernels.cuh"
using namespace std;
/* ---- CPU/Wrapper Functions ---- */
// Name: initialize_dp_array_Wrapper
//
// Description: CPU wrapper function to execute the kernel to initialize the dp array
//
//
void initialize_dp_array_Wrapper(dim3 grid, dim3 blocks, distPred* d_dp, int infinity, int source, int num_vertices)
{
cudaGetLastError(); // reset the runtime error variable to cudaSuccess
initialize_dp_array<<<grid, blocks>>>(d_dp, infinity, source, num_vertices);
}
// Name: BellmanFord_GPU_Wrapper
//
// Description: CPU wrapper function to execute the B-F kernel
//
//
void BellmanFord_GPU_Wrapper(int proc_start_edge, int proc_num_edges, uint32_t num_vertices, uint32_t num_edges,
Edge* d_edgeList, distPred* d_dp, bool BF_short, bool& finished,
dim3 grid, dim3 blocks)
{
// since CUDA is whining about using bools and I can't find a fix,
// I'm going to use ints - 1 is true, 0 is false
int h_change = 0;
int *d_change = 0;
cudaMalloc((void**) &d_change, sizeof(int));
finished = true;
for (int i=1; i < (num_vertices-1); i++)
{
// we make the local change false
h_change = 0;
// we copy the local value to the device
cudaMemcpy(d_change, &h_change, sizeof(int), cudaMemcpyHostToDevice);
// we then run the kernel
cudaBellmanFord<<<grid, blocks>>>(proc_start_edge, proc_num_edges, d_edgeList,
d_dp, d_change);
cudaDeviceSynchronize();
// we now copy the value from the device back to the local variable
cudaMemcpy(&h_change, d_change, sizeof(int), cudaMemcpyDeviceToHost);
// if the device is reporting a change, then we are not finished
//
if (h_change == 1) {
finished = false;
}
if (BF_short == true) {
if (!h_change) {
break;
}
}
}
cudaFree(d_change);
}
/* ---- Kernels ---- */
// Name: initialize_dp_array
//
// Description: Initializes the d_dp array
//
//
__global__ void initialize_dp_array(distPred* d_dp, int infinity, int source, int num_vertices)
{
uint32_t tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if (tid < num_vertices)
{
d_dp[tid].dist = infinity;
d_dp[tid].pred = (int)NULL;
if (tid == source) {
d_dp[tid].dist = 0;
}
}
}
// Name: cudaBellmanFord
//
// Description: Executes the Bellman-Ford algorithm on the specified edges
//
//
__global__ void cudaBellmanFord(int proc_start_edge, int proc_num_edges, Edge* d_edgeList,
distPred* d_dp, int* d_change)
{
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int edge = proc_start_edge + tid;
int u, v, w;
if (edge < (proc_start_edge + proc_num_edges))
{
u = d_edgeList[edge].u;
v = d_edgeList[edge].v;
w = d_edgeList[edge].w;
if ((d_dp[u].dist + w) < d_dp[v].dist)
{
d_dp[v].dist = (d_dp[u].dist + w);
d_dp[v].pred = u;
*d_change = 1;
}
}
}
|
5bf1fa91e9ba8c62d75f47ea6743071d05f2087e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/fil/fil.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <treelite/c_api.h>
#include <treelite/frontend.h>
#include <treelite/tree.h>
#include <cmath>
#include <cstdio>
#include <cuda_utils.cuh>
#include <limits>
#include <memory>
#include <random/rng.cuh>
#include <utility>
#define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error")
namespace ML {
using namespace MLCommon;
namespace tl = treelite;
namespace tlf = treelite::frontend;
struct FilTestParams {
// input data parameters
int num_rows;
int num_cols;
float nan_prob;
// forest parameters
int depth;
int num_trees;
float leaf_prob;
// output parameters
fil::output_t output;
float threshold;
float global_bias;
// runtime parameters
fil::algo_t algo;
int seed;
float tolerance;
// treelite parameters, only used for treelite tests
tl::Operator op;
fil::leaf_value_t leaf_payload_type;
// num_classes must be 1 or 2 when FLOAT_SCALAR == leaf_payload_type
// (1 if it's regression)
// num_classes must be >1 when INT_CLASS_LABEL == leaf_payload_type
// it's used in treelite ModelBuilder initialization
int num_classes;
size_t num_proba_outputs() { return num_rows * ::max(num_classes, 2); }
size_t num_preds_outputs() { return num_rows; }
};
std::string output2str(fil::output_t output) {
if (output == fil::RAW) return "RAW";
std::string s = "";
if (output & fil::AVG) s += "| AVG";
if (output & fil::CLASS) s += "| CLASS";
if (output & fil::SIGMOID) s += "| SIGMOID";
return s;
}
std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) {
os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols
<< ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth
<< ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob
<< ", output = " << output2str(ps.output)
<< ", threshold = " << ps.threshold << ", algo = " << ps.algo
<< ", seed = " << ps.seed << ", tolerance = " << ps.tolerance
<< ", op = " << tl::OpName(ps.op) << ", global_bias = " << ps.global_bias
<< ", leaf_payload_type = " << ps.leaf_payload_type
<< ", num_classes = " << ps.num_classes;
return os;
}
__global__ void nan_kernel(float* data, const bool* mask, int len, float nan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (!mask[tid]) data[tid] = nan;
}
float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); }
class BaseFilTest : public testing::TestWithParam<FilTestParams> {
protected:
void setup_helper() {
// setup
ps = testing::TestWithParam<FilTestParams>::GetParam();
CUDA_CHECK(hipStreamCreate(&stream));
handle.setStream(stream);
generate_forest();
generate_data();
predict_on_cpu();
predict_on_gpu();
}
void SetUp() override { setup_helper(); }
void TearDown() override {
CUDA_CHECK(hipFree(preds_d));
CUDA_CHECK(hipFree(want_preds_d));
CUDA_CHECK(hipFree(data_d));
CUDA_CHECK(hipFree(want_proba_d));
CUDA_CHECK(hipFree(proba_d));
}
void generate_forest() {
size_t num_nodes = forest_num_nodes();
// helper data
/// weights, used as float* or int*
int* weights_d = nullptr;
float* thresholds_d = nullptr;
int* fids_d = nullptr;
bool* def_lefts_d = nullptr;
bool* is_leafs_d = nullptr;
bool* def_lefts_h = nullptr;
bool* is_leafs_h = nullptr;
// allocate GPU data
allocate(weights_d, num_nodes);
// sizeof(float) == sizeof(int)
allocate(thresholds_d, num_nodes);
allocate(fids_d, num_nodes);
allocate(def_lefts_d, num_nodes);
allocate(is_leafs_d, num_nodes);
// generate on-GPU random data
Random::Rng r(ps.seed);
if (ps.leaf_payload_type == fil::leaf_value_t::FLOAT_SCALAR) {
r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream);
} else {
// [0..num_classes)
r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream);
}
r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream);
r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream);
r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream);
r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream);
// copy data to host
std::vector<float> thresholds_h(num_nodes);
std::vector<int> weights_h(num_nodes), fids_h(num_nodes);
def_lefts_h = new bool[num_nodes];
is_leafs_h = new bool[num_nodes];
updateHost(weights_h.data(), (int*)weights_d, num_nodes, stream);
updateHost(thresholds_h.data(), thresholds_d, num_nodes, stream);
updateHost(fids_h.data(), fids_d, num_nodes, stream);
updateHost(def_lefts_h, def_lefts_d, num_nodes, stream);
updateHost(is_leafs_h, is_leafs_d, num_nodes, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
// mark leaves
for (size_t i = 0; i < ps.num_trees; ++i) {
int num_tree_nodes = tree_num_nodes();
size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2;
size_t leaf_end = num_tree_nodes * (i + 1);
for (size_t j = leaf_start; j < leaf_end; ++j) {
is_leafs_h[j] = true;
}
}
// initialize nodes
nodes.resize(num_nodes);
for (size_t i = 0; i < num_nodes; ++i) {
fil::val_t w;
switch (ps.leaf_payload_type) {
case fil::leaf_value_t::INT_CLASS_LABEL:
w.idx = weights_h[i];
break;
case fil::leaf_value_t::FLOAT_SCALAR:
// not relying on fil::val_t internals
// merely that we copied floats into weights_h earlier
std::memcpy(&w.f, &weights_h[i], sizeof w.f);
}
fil::node_init(&nodes[i], w, thresholds_h[i], fids_h[i], def_lefts_h[i],
is_leafs_h[i]);
}
// clean up
delete[] def_lefts_h;
delete[] is_leafs_h;
CUDA_CHECK(hipFree(is_leafs_d));
CUDA_CHECK(hipFree(def_lefts_d));
CUDA_CHECK(hipFree(fids_d));
CUDA_CHECK(hipFree(thresholds_d));
CUDA_CHECK(hipFree(weights_d));
}
void generate_data() {
// allocate arrays
size_t num_data = ps.num_rows * ps.num_cols;
allocate(data_d, num_data);
bool* mask_d = nullptr;
allocate(mask_d, num_data);
// generate random data
Random::Rng r(ps.seed);
r.uniform(data_d, num_data, -1.0f, 1.0f, stream);
r.bernoulli(mask_d, num_data, ps.nan_prob, stream);
int tpb = 256;
hipLaunchKernelGGL(( nan_kernel), dim3(ceildiv(int(num_data), tpb)), dim3(tpb), 0, stream,
data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN());
CUDA_CHECK(hipPeekAtLastError());
// copy to host
data_h.resize(num_data);
updateHost(data_h.data(), data_d, num_data, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
// clean up
CUDA_CHECK(hipFree(mask_d));
}
void transform(float f, float& proba, float& output) {
if ((ps.output & fil::output_t::AVG) != 0) {
f *= (1.0f / ps.num_trees);
}
f += ps.global_bias;
if ((ps.output & fil::output_t::SIGMOID) != 0) {
f = sigmoid(f);
}
proba = f;
if ((ps.output & fil::output_t::CLASS) != 0) {
f = f > ps.threshold ? 1.0f : 0.0f;
}
output = f;
}
void complement(float* proba) { proba[0] = 1.0f - proba[1]; }
void predict_on_cpu() {
// predict on host
std::vector<float> want_preds_h(ps.num_preds_outputs());
std::vector<float> want_proba_h(ps.num_proba_outputs());
int num_nodes = tree_num_nodes();
switch (ps.leaf_payload_type) {
case fil::leaf_value_t::FLOAT_SCALAR:
for (int i = 0; i < ps.num_rows; ++i) {
float pred = 0.0f;
for (int j = 0; j < ps.num_trees; ++j) {
pred +=
infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f;
}
transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]);
complement(&(want_proba_h[i * 2]));
}
break;
case fil::leaf_value_t::INT_CLASS_LABEL:
std::vector<int> class_votes(ps.num_classes);
for (int r = 0; r < ps.num_rows; ++r) {
std::fill(class_votes.begin(), class_votes.end(), 0);
for (int j = 0; j < ps.num_trees; ++j) {
int class_label =
infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols])
.idx;
++class_votes[class_label];
}
for (int c = 0; c < ps.num_classes; ++c) {
float thresholded_proba; // not used; do argmax instead
transform(class_votes[c], want_proba_h[r * ps.num_classes + c],
thresholded_proba);
}
want_preds_h[r] =
std::max_element(class_votes.begin(), class_votes.end()) -
class_votes.begin();
}
break;
}
// copy to GPU
allocate(want_preds_d, ps.num_preds_outputs());
allocate(want_proba_d, ps.num_proba_outputs());
updateDevice(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(),
stream);
updateDevice(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(),
stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
virtual void init_forest(fil::forest_t* pforest) = 0;
void predict_on_gpu() {
fil::forest_t forest = nullptr;
init_forest(&forest);
// predict
allocate(preds_d, ps.num_preds_outputs());
allocate(proba_d, ps.num_proba_outputs());
fil::predict(handle, forest, preds_d, data_d, ps.num_rows);
fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true);
CUDA_CHECK(hipStreamSynchronize(stream));
// cleanup
fil::free(handle, forest);
}
void compare() {
ASSERT_TRUE(devArrMatch(want_proba_d, proba_d, ps.num_proba_outputs(),
CompareApprox<float>(ps.tolerance), stream));
float tolerance = ps.leaf_payload_type == fil::leaf_value_t::FLOAT_SCALAR
? ps.tolerance
: std::numeric_limits<float>::epsilon();
// in multi-class prediction, floats represent the most likely class
// and would be generated by converting an int to float
ASSERT_TRUE(devArrMatch(want_preds_d, preds_d, ps.num_rows,
CompareApprox<float>(tolerance), stream));
}
fil::val_t infer_one_tree(fil::dense_node_t* root, float* data) {
int curr = 0;
float threshold = 0.0f;
fil::val_t output{.f = 0.0f};
int fid = 0;
bool def_left = false, is_leaf = false;
for (;;) {
fil::node_decode(&root[curr], &output, &threshold, &fid, &def_left,
&is_leaf);
if (is_leaf) break;
float val = data[fid];
bool cond = isnan(val) ? !def_left : val >= threshold;
curr = (curr << 1) + 1 + (cond ? 1 : 0);
}
return output;
}
int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; }
int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; }
// predictions
float* preds_d = nullptr;
float* proba_d = nullptr;
float* want_preds_d = nullptr;
float* want_proba_d = nullptr;
// input data
float* data_d = nullptr;
std::vector<float> data_h;
// forest data
std::vector<fil::dense_node_t> nodes;
// parameters
hipStream_t stream;
cumlHandle handle;
FilTestParams ps;
};
class PredictDenseFilTest : public BaseFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
// init FIL model
fil::forest_params_t fil_ps;
fil_ps.depth = ps.depth;
fil_ps.num_trees = ps.num_trees;
fil_ps.num_cols = ps.num_cols;
fil_ps.algo = ps.algo;
fil_ps.output = ps.output;
fil_ps.threshold = ps.threshold;
fil_ps.global_bias = ps.global_bias;
fil_ps.leaf_payload_type = ps.leaf_payload_type;
fil_ps.num_classes = ps.num_classes;
fil::init_dense(handle, pforest, nodes.data(), &fil_ps);
}
};
template <typename fil_node_t>
class BasePredictSparseFilTest : public BaseFilTest {
protected:
void dense2sparse_node(const fil::dense_node_t* dense_root, int i_dense,
int i_sparse_root, int i_sparse) {
float threshold;
fil::val_t output;
int feature;
bool def_left, is_leaf;
fil::node_decode(&dense_root[i_dense], &output, &threshold, &feature,
&def_left, &is_leaf);
if (is_leaf) {
// leaf sparse node
node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left,
is_leaf, 0);
return;
}
// inner sparse node
// reserve space for children
int left_index = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
sparse_nodes.push_back(fil_node_t());
node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left,
is_leaf, left_index - i_sparse_root);
dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index);
dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root,
left_index + 1);
}
void dense2sparse_tree(const fil::dense_node_t* dense_root) {
int i_sparse_root = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root);
trees.push_back(i_sparse_root);
}
void dense2sparse() {
for (int tree = 0; tree < ps.num_trees; ++tree) {
dense2sparse_tree(&nodes[tree * tree_num_nodes()]);
}
}
void init_forest(fil::forest_t* pforest) override {
// init FIL model
fil::forest_params_t fil_params;
fil_params.num_trees = ps.num_trees;
fil_params.num_cols = ps.num_cols;
fil_params.algo = ps.algo;
fil_params.output = ps.output;
fil_params.threshold = ps.threshold;
fil_params.global_bias = ps.global_bias;
fil_params.leaf_payload_type = ps.leaf_payload_type;
fil_params.num_classes = ps.num_classes;
dense2sparse();
fil_params.num_nodes = sparse_nodes.size();
fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(),
&fil_params);
}
std::vector<fil_node_t> sparse_nodes;
std::vector<int> trees;
};
typedef BasePredictSparseFilTest<fil::sparse_node16_t> PredictSparse16FilTest;
typedef BasePredictSparseFilTest<fil::sparse_node8_t> PredictSparse8FilTest;
class TreeliteFilTest : public BaseFilTest {
protected:
/** adds nodes[node] of tree starting at index root to builder
at index at *pkey, increments *pkey,
and returns the treelite key of the node */
int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root,
int node) {
int key = (*pkey)++;
builder->CreateNode(key);
int feature;
float threshold;
fil::val_t output;
bool is_leaf, default_left;
fil::node_decode(&nodes[node], &output, &threshold, &feature, &default_left,
&is_leaf);
if (is_leaf) {
switch (ps.leaf_payload_type) {
case fil::leaf_value_t::FLOAT_SCALAR:
// default is fil::FLOAT_SCALAR
builder->SetLeafNode(key, output.f);
break;
case fil::leaf_value_t::INT_CLASS_LABEL:
std::vector<tl::tl_float> vec(ps.num_classes);
for (int i = 0; i < ps.num_classes; ++i)
vec[i] = i == output.idx ? 1.0f : 0.0f;
builder->SetLeafVectorNode(key, vec);
}
} else {
int left = root + 2 * (node - root) + 1;
int right = root + 2 * (node - root) + 2;
switch (ps.op) {
case tl::Operator::kLT:
break;
case tl::Operator::kLE:
// adjust the threshold
threshold =
std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
break;
case tl::Operator::kGT:
// adjust the threshold; left and right still need to be swapped
threshold =
std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
case tl::Operator::kGE:
// swap left and right
std::swap(left, right);
default_left = !default_left;
break;
default:
ASSERT(false, "comparison operator must be <, >, <= or >=");
}
int left_key = node_to_treelite(builder, pkey, root, left);
int right_key = node_to_treelite(builder, pkey, root, right);
builder->SetNumericalTestNode(key, feature, ps.op, threshold,
default_left, left_key, right_key);
}
return key;
}
void init_forest_impl(fil::forest_t* pforest,
fil::storage_type_t storage_type) {
bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0;
int treelite_num_classes =
ps.leaf_payload_type == fil::leaf_value_t::FLOAT_SCALAR ? 1
: ps.num_classes;
std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder(
ps.num_cols, treelite_num_classes, random_forest_flag));
// prediction transform
if ((ps.output & fil::output_t::SIGMOID) != 0) {
model_builder->SetModelParam("pred_transform", "sigmoid");
} else if (ps.leaf_payload_type == fil::leaf_value_t::INT_CLASS_LABEL &&
ps.num_classes >= 2) {
model_builder->SetModelParam("pred_transform", "max_index");
ps.output = fil::output_t::CLASS;
}
// global bias
char* global_bias_str = nullptr;
ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0,
"cannot convert global_bias into a string");
model_builder->SetModelParam("global_bias", global_bias_str);
free(global_bias_str);
// build the trees
for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) {
tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder();
int key_counter = 0;
int root = i_tree * tree_num_nodes();
int root_key = node_to_treelite(tree_builder, &key_counter, root, root);
tree_builder->SetRootNode(root_key);
// InsertTree() consumes tree_builder
TL_CPP_CHECK(model_builder->InsertTree(tree_builder));
}
// commit the model
std::unique_ptr<tl::Model> model(new tl::Model);
model_builder->CommitModel(model.get());
// init FIL forest with the model
fil::treelite_params_t params;
params.algo = ps.algo;
params.threshold = ps.threshold;
params.output_class = (ps.output & fil::output_t::CLASS) != 0;
params.storage_type = storage_type;
fil::from_treelite(handle, pforest, (ModelHandle)model.get(), ¶ms);
CUDA_CHECK(hipStreamSynchronize(stream));
}
};
class TreeliteDenseFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::DENSE);
}
};
class TreeliteSparse16FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::SPARSE);
}
};
class TreeliteSparse8FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::SPARSE8);
}
};
class TreeliteAutoFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::AUTO);
}
};
// test for failures; currently only supported for sparse8 nodes
class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest {
protected:
// model import happens in check(), so this function is empty
void SetUp() override {}
void check() { ASSERT_THROW(setup_helper(), MLCommon::Exception); }
};
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> predict_dense_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 2},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 7},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 4},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 4},
};
TEST_P(PredictDenseFilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest,
testing::ValuesIn(predict_dense_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> predict_sparse_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 5000},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 6},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 3},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 3},
};
TEST_P(PredictSparse16FilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest,
testing::ValuesIn(predict_sparse_inputs));
TEST_P(PredictSparse8FilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest,
testing::ValuesIn(predict_sparse_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> import_dense_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGE, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::INT_CLASS_LABEL, 6},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 3},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 7},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::INT_CLASS_LABEL, 6},
};
TEST_P(TreeliteDenseFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest,
testing::ValuesIn(import_dense_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> import_sparse_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGE, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::INT_CLASS_LABEL, 10},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::INT_CLASS_LABEL, 4},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLE, fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::INT_CLASS_LABEL, 3},
};
TEST_P(TreeliteSparse16FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest,
testing::ValuesIn(import_sparse_inputs));
TEST_P(TreeliteSparse8FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest,
testing::ValuesIn(import_sparse_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> import_auto_inputs = {
{20000, 50, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 15, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 10, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::INT_CLASS_LABEL, 3},
#if 0
{20000, 50, 0.05, 19, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::INT_CLASS_LABEL, 6},
#endif
};
TEST_P(TreeliteAutoFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest,
testing::ValuesIn(import_auto_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator,
// FIL implementation, number of classes
// adjust test parameters if the sparse8 format changes
std::vector<FilTestParams> import_throw_sparse8_inputs = {
// to many features
{100, 20000, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
// too many tree nodes
{20000, 50, 0.05, 16, 5, 0, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42,
2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
};
TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteThrowSparse8FilTest,
testing::ValuesIn(import_throw_sparse8_inputs));
} // namespace ML
| 5bf1fa91e9ba8c62d75f47ea6743071d05f2087e.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/fil/fil.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <treelite/c_api.h>
#include <treelite/frontend.h>
#include <treelite/tree.h>
#include <cmath>
#include <cstdio>
#include <cuda_utils.cuh>
#include <limits>
#include <memory>
#include <random/rng.cuh>
#include <utility>
#define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error")
namespace ML {
using namespace MLCommon;
namespace tl = treelite;
namespace tlf = treelite::frontend;
struct FilTestParams {
// input data parameters
int num_rows;
int num_cols;
float nan_prob;
// forest parameters
int depth;
int num_trees;
float leaf_prob;
// output parameters
fil::output_t output;
float threshold;
float global_bias;
// runtime parameters
fil::algo_t algo;
int seed;
float tolerance;
// treelite parameters, only used for treelite tests
tl::Operator op;
fil::leaf_value_t leaf_payload_type;
// num_classes must be 1 or 2 when FLOAT_SCALAR == leaf_payload_type
// (1 if it's regression)
// num_classes must be >1 when INT_CLASS_LABEL == leaf_payload_type
// it's used in treelite ModelBuilder initialization
int num_classes;
size_t num_proba_outputs() { return num_rows * std::max(num_classes, 2); }
size_t num_preds_outputs() { return num_rows; }
};
std::string output2str(fil::output_t output) {
if (output == fil::RAW) return "RAW";
std::string s = "";
if (output & fil::AVG) s += "| AVG";
if (output & fil::CLASS) s += "| CLASS";
if (output & fil::SIGMOID) s += "| SIGMOID";
return s;
}
std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) {
os << "num_rows = " << ps.num_rows << ", num_cols = " << ps.num_cols
<< ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth
<< ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob
<< ", output = " << output2str(ps.output)
<< ", threshold = " << ps.threshold << ", algo = " << ps.algo
<< ", seed = " << ps.seed << ", tolerance = " << ps.tolerance
<< ", op = " << tl::OpName(ps.op) << ", global_bias = " << ps.global_bias
<< ", leaf_payload_type = " << ps.leaf_payload_type
<< ", num_classes = " << ps.num_classes;
return os;
}
__global__ void nan_kernel(float* data, const bool* mask, int len, float nan) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (!mask[tid]) data[tid] = nan;
}
float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); }
class BaseFilTest : public testing::TestWithParam<FilTestParams> {
protected:
void setup_helper() {
// setup
ps = testing::TestWithParam<FilTestParams>::GetParam();
CUDA_CHECK(cudaStreamCreate(&stream));
handle.setStream(stream);
generate_forest();
generate_data();
predict_on_cpu();
predict_on_gpu();
}
void SetUp() override { setup_helper(); }
void TearDown() override {
CUDA_CHECK(cudaFree(preds_d));
CUDA_CHECK(cudaFree(want_preds_d));
CUDA_CHECK(cudaFree(data_d));
CUDA_CHECK(cudaFree(want_proba_d));
CUDA_CHECK(cudaFree(proba_d));
}
void generate_forest() {
size_t num_nodes = forest_num_nodes();
// helper data
/// weights, used as float* or int*
int* weights_d = nullptr;
float* thresholds_d = nullptr;
int* fids_d = nullptr;
bool* def_lefts_d = nullptr;
bool* is_leafs_d = nullptr;
bool* def_lefts_h = nullptr;
bool* is_leafs_h = nullptr;
// allocate GPU data
allocate(weights_d, num_nodes);
// sizeof(float) == sizeof(int)
allocate(thresholds_d, num_nodes);
allocate(fids_d, num_nodes);
allocate(def_lefts_d, num_nodes);
allocate(is_leafs_d, num_nodes);
// generate on-GPU random data
Random::Rng r(ps.seed);
if (ps.leaf_payload_type == fil::leaf_value_t::FLOAT_SCALAR) {
r.uniform((float*)weights_d, num_nodes, -1.0f, 1.0f, stream);
} else {
// [0..num_classes)
r.uniformInt((int*)weights_d, num_nodes, 0, ps.num_classes, stream);
}
r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream);
r.uniformInt(fids_d, num_nodes, 0, ps.num_cols, stream);
r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream);
r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream);
// copy data to host
std::vector<float> thresholds_h(num_nodes);
std::vector<int> weights_h(num_nodes), fids_h(num_nodes);
def_lefts_h = new bool[num_nodes];
is_leafs_h = new bool[num_nodes];
updateHost(weights_h.data(), (int*)weights_d, num_nodes, stream);
updateHost(thresholds_h.data(), thresholds_d, num_nodes, stream);
updateHost(fids_h.data(), fids_d, num_nodes, stream);
updateHost(def_lefts_h, def_lefts_d, num_nodes, stream);
updateHost(is_leafs_h, is_leafs_d, num_nodes, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// mark leaves
for (size_t i = 0; i < ps.num_trees; ++i) {
int num_tree_nodes = tree_num_nodes();
size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2;
size_t leaf_end = num_tree_nodes * (i + 1);
for (size_t j = leaf_start; j < leaf_end; ++j) {
is_leafs_h[j] = true;
}
}
// initialize nodes
nodes.resize(num_nodes);
for (size_t i = 0; i < num_nodes; ++i) {
fil::val_t w;
switch (ps.leaf_payload_type) {
case fil::leaf_value_t::INT_CLASS_LABEL:
w.idx = weights_h[i];
break;
case fil::leaf_value_t::FLOAT_SCALAR:
// not relying on fil::val_t internals
// merely that we copied floats into weights_h earlier
std::memcpy(&w.f, &weights_h[i], sizeof w.f);
}
fil::node_init(&nodes[i], w, thresholds_h[i], fids_h[i], def_lefts_h[i],
is_leafs_h[i]);
}
// clean up
delete[] def_lefts_h;
delete[] is_leafs_h;
CUDA_CHECK(cudaFree(is_leafs_d));
CUDA_CHECK(cudaFree(def_lefts_d));
CUDA_CHECK(cudaFree(fids_d));
CUDA_CHECK(cudaFree(thresholds_d));
CUDA_CHECK(cudaFree(weights_d));
}
void generate_data() {
// allocate arrays
size_t num_data = ps.num_rows * ps.num_cols;
allocate(data_d, num_data);
bool* mask_d = nullptr;
allocate(mask_d, num_data);
// generate random data
Random::Rng r(ps.seed);
r.uniform(data_d, num_data, -1.0f, 1.0f, stream);
r.bernoulli(mask_d, num_data, ps.nan_prob, stream);
int tpb = 256;
nan_kernel<<<ceildiv(int(num_data), tpb), tpb, 0, stream>>>(
data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN());
CUDA_CHECK(cudaPeekAtLastError());
// copy to host
data_h.resize(num_data);
updateHost(data_h.data(), data_d, num_data, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// clean up
CUDA_CHECK(cudaFree(mask_d));
}
void transform(float f, float& proba, float& output) {
if ((ps.output & fil::output_t::AVG) != 0) {
f *= (1.0f / ps.num_trees);
}
f += ps.global_bias;
if ((ps.output & fil::output_t::SIGMOID) != 0) {
f = sigmoid(f);
}
proba = f;
if ((ps.output & fil::output_t::CLASS) != 0) {
f = f > ps.threshold ? 1.0f : 0.0f;
}
output = f;
}
void complement(float* proba) { proba[0] = 1.0f - proba[1]; }
void predict_on_cpu() {
// predict on host
std::vector<float> want_preds_h(ps.num_preds_outputs());
std::vector<float> want_proba_h(ps.num_proba_outputs());
int num_nodes = tree_num_nodes();
switch (ps.leaf_payload_type) {
case fil::leaf_value_t::FLOAT_SCALAR:
for (int i = 0; i < ps.num_rows; ++i) {
float pred = 0.0f;
for (int j = 0; j < ps.num_trees; ++j) {
pred +=
infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.num_cols]).f;
}
transform(pred, want_proba_h[i * 2 + 1], want_preds_h[i]);
complement(&(want_proba_h[i * 2]));
}
break;
case fil::leaf_value_t::INT_CLASS_LABEL:
std::vector<int> class_votes(ps.num_classes);
for (int r = 0; r < ps.num_rows; ++r) {
std::fill(class_votes.begin(), class_votes.end(), 0);
for (int j = 0; j < ps.num_trees; ++j) {
int class_label =
infer_one_tree(&nodes[j * num_nodes], &data_h[r * ps.num_cols])
.idx;
++class_votes[class_label];
}
for (int c = 0; c < ps.num_classes; ++c) {
float thresholded_proba; // not used; do argmax instead
transform(class_votes[c], want_proba_h[r * ps.num_classes + c],
thresholded_proba);
}
want_preds_h[r] =
std::max_element(class_votes.begin(), class_votes.end()) -
class_votes.begin();
}
break;
}
// copy to GPU
allocate(want_preds_d, ps.num_preds_outputs());
allocate(want_proba_d, ps.num_proba_outputs());
updateDevice(want_preds_d, want_preds_h.data(), ps.num_preds_outputs(),
stream);
updateDevice(want_proba_d, want_proba_h.data(), ps.num_proba_outputs(),
stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
virtual void init_forest(fil::forest_t* pforest) = 0;
void predict_on_gpu() {
fil::forest_t forest = nullptr;
init_forest(&forest);
// predict
allocate(preds_d, ps.num_preds_outputs());
allocate(proba_d, ps.num_proba_outputs());
fil::predict(handle, forest, preds_d, data_d, ps.num_rows);
fil::predict(handle, forest, proba_d, data_d, ps.num_rows, true);
CUDA_CHECK(cudaStreamSynchronize(stream));
// cleanup
fil::free(handle, forest);
}
void compare() {
ASSERT_TRUE(devArrMatch(want_proba_d, proba_d, ps.num_proba_outputs(),
CompareApprox<float>(ps.tolerance), stream));
float tolerance = ps.leaf_payload_type == fil::leaf_value_t::FLOAT_SCALAR
? ps.tolerance
: std::numeric_limits<float>::epsilon();
// in multi-class prediction, floats represent the most likely class
// and would be generated by converting an int to float
ASSERT_TRUE(devArrMatch(want_preds_d, preds_d, ps.num_rows,
CompareApprox<float>(tolerance), stream));
}
fil::val_t infer_one_tree(fil::dense_node_t* root, float* data) {
int curr = 0;
float threshold = 0.0f;
fil::val_t output{.f = 0.0f};
int fid = 0;
bool def_left = false, is_leaf = false;
for (;;) {
fil::node_decode(&root[curr], &output, &threshold, &fid, &def_left,
&is_leaf);
if (is_leaf) break;
float val = data[fid];
bool cond = isnan(val) ? !def_left : val >= threshold;
curr = (curr << 1) + 1 + (cond ? 1 : 0);
}
return output;
}
int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; }
int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; }
// predictions
float* preds_d = nullptr;
float* proba_d = nullptr;
float* want_preds_d = nullptr;
float* want_proba_d = nullptr;
// input data
float* data_d = nullptr;
std::vector<float> data_h;
// forest data
std::vector<fil::dense_node_t> nodes;
// parameters
cudaStream_t stream;
cumlHandle handle;
FilTestParams ps;
};
class PredictDenseFilTest : public BaseFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
// init FIL model
fil::forest_params_t fil_ps;
fil_ps.depth = ps.depth;
fil_ps.num_trees = ps.num_trees;
fil_ps.num_cols = ps.num_cols;
fil_ps.algo = ps.algo;
fil_ps.output = ps.output;
fil_ps.threshold = ps.threshold;
fil_ps.global_bias = ps.global_bias;
fil_ps.leaf_payload_type = ps.leaf_payload_type;
fil_ps.num_classes = ps.num_classes;
fil::init_dense(handle, pforest, nodes.data(), &fil_ps);
}
};
template <typename fil_node_t>
class BasePredictSparseFilTest : public BaseFilTest {
protected:
void dense2sparse_node(const fil::dense_node_t* dense_root, int i_dense,
int i_sparse_root, int i_sparse) {
float threshold;
fil::val_t output;
int feature;
bool def_left, is_leaf;
fil::node_decode(&dense_root[i_dense], &output, &threshold, &feature,
&def_left, &is_leaf);
if (is_leaf) {
// leaf sparse node
node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left,
is_leaf, 0);
return;
}
// inner sparse node
// reserve space for children
int left_index = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
sparse_nodes.push_back(fil_node_t());
node_init(&sparse_nodes[i_sparse], output, threshold, feature, def_left,
is_leaf, left_index - i_sparse_root);
dense2sparse_node(dense_root, 2 * i_dense + 1, i_sparse_root, left_index);
dense2sparse_node(dense_root, 2 * i_dense + 2, i_sparse_root,
left_index + 1);
}
void dense2sparse_tree(const fil::dense_node_t* dense_root) {
int i_sparse_root = sparse_nodes.size();
sparse_nodes.push_back(fil_node_t());
dense2sparse_node(dense_root, 0, i_sparse_root, i_sparse_root);
trees.push_back(i_sparse_root);
}
void dense2sparse() {
for (int tree = 0; tree < ps.num_trees; ++tree) {
dense2sparse_tree(&nodes[tree * tree_num_nodes()]);
}
}
void init_forest(fil::forest_t* pforest) override {
// init FIL model
fil::forest_params_t fil_params;
fil_params.num_trees = ps.num_trees;
fil_params.num_cols = ps.num_cols;
fil_params.algo = ps.algo;
fil_params.output = ps.output;
fil_params.threshold = ps.threshold;
fil_params.global_bias = ps.global_bias;
fil_params.leaf_payload_type = ps.leaf_payload_type;
fil_params.num_classes = ps.num_classes;
dense2sparse();
fil_params.num_nodes = sparse_nodes.size();
fil::init_sparse(handle, pforest, trees.data(), sparse_nodes.data(),
&fil_params);
}
std::vector<fil_node_t> sparse_nodes;
std::vector<int> trees;
};
typedef BasePredictSparseFilTest<fil::sparse_node16_t> PredictSparse16FilTest;
typedef BasePredictSparseFilTest<fil::sparse_node8_t> PredictSparse8FilTest;
class TreeliteFilTest : public BaseFilTest {
protected:
/** adds nodes[node] of tree starting at index root to builder
at index at *pkey, increments *pkey,
and returns the treelite key of the node */
int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root,
int node) {
int key = (*pkey)++;
builder->CreateNode(key);
int feature;
float threshold;
fil::val_t output;
bool is_leaf, default_left;
fil::node_decode(&nodes[node], &output, &threshold, &feature, &default_left,
&is_leaf);
if (is_leaf) {
switch (ps.leaf_payload_type) {
case fil::leaf_value_t::FLOAT_SCALAR:
// default is fil::FLOAT_SCALAR
builder->SetLeafNode(key, output.f);
break;
case fil::leaf_value_t::INT_CLASS_LABEL:
std::vector<tl::tl_float> vec(ps.num_classes);
for (int i = 0; i < ps.num_classes; ++i)
vec[i] = i == output.idx ? 1.0f : 0.0f;
builder->SetLeafVectorNode(key, vec);
}
} else {
int left = root + 2 * (node - root) + 1;
int right = root + 2 * (node - root) + 2;
switch (ps.op) {
case tl::Operator::kLT:
break;
case tl::Operator::kLE:
// adjust the threshold
threshold =
std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
break;
case tl::Operator::kGT:
// adjust the threshold; left and right still need to be swapped
threshold =
std::nextafterf(threshold, -std::numeric_limits<float>::infinity());
case tl::Operator::kGE:
// swap left and right
std::swap(left, right);
default_left = !default_left;
break;
default:
ASSERT(false, "comparison operator must be <, >, <= or >=");
}
int left_key = node_to_treelite(builder, pkey, root, left);
int right_key = node_to_treelite(builder, pkey, root, right);
builder->SetNumericalTestNode(key, feature, ps.op, threshold,
default_left, left_key, right_key);
}
return key;
}
void init_forest_impl(fil::forest_t* pforest,
fil::storage_type_t storage_type) {
bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0;
int treelite_num_classes =
ps.leaf_payload_type == fil::leaf_value_t::FLOAT_SCALAR ? 1
: ps.num_classes;
std::unique_ptr<tlf::ModelBuilder> model_builder(new tlf::ModelBuilder(
ps.num_cols, treelite_num_classes, random_forest_flag));
// prediction transform
if ((ps.output & fil::output_t::SIGMOID) != 0) {
model_builder->SetModelParam("pred_transform", "sigmoid");
} else if (ps.leaf_payload_type == fil::leaf_value_t::INT_CLASS_LABEL &&
ps.num_classes >= 2) {
model_builder->SetModelParam("pred_transform", "max_index");
ps.output = fil::output_t::CLASS;
}
// global bias
char* global_bias_str = nullptr;
ASSERT(asprintf(&global_bias_str, "%f", double(ps.global_bias)) > 0,
"cannot convert global_bias into a string");
model_builder->SetModelParam("global_bias", global_bias_str);
free(global_bias_str);
// build the trees
for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) {
tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder();
int key_counter = 0;
int root = i_tree * tree_num_nodes();
int root_key = node_to_treelite(tree_builder, &key_counter, root, root);
tree_builder->SetRootNode(root_key);
// InsertTree() consumes tree_builder
TL_CPP_CHECK(model_builder->InsertTree(tree_builder));
}
// commit the model
std::unique_ptr<tl::Model> model(new tl::Model);
model_builder->CommitModel(model.get());
// init FIL forest with the model
fil::treelite_params_t params;
params.algo = ps.algo;
params.threshold = ps.threshold;
params.output_class = (ps.output & fil::output_t::CLASS) != 0;
params.storage_type = storage_type;
fil::from_treelite(handle, pforest, (ModelHandle)model.get(), ¶ms);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
};
class TreeliteDenseFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::DENSE);
}
};
class TreeliteSparse16FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::SPARSE);
}
};
class TreeliteSparse8FilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::SPARSE8);
}
};
class TreeliteAutoFilTest : public TreeliteFilTest {
protected:
void init_forest(fil::forest_t* pforest) override {
init_forest_impl(pforest, fil::storage_type_t::AUTO);
}
};
// test for failures; currently only supported for sparse8 nodes
class TreeliteThrowSparse8FilTest : public TreeliteSparse8FilTest {
protected:
// model import happens in check(), so this function is empty
void SetUp() override {}
void check() { ASSERT_THROW(setup_helper(), MLCommon::Exception); }
};
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> predict_dense_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 2},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 7},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 4},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 4},
};
TEST_P(PredictDenseFilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictDenseFilTest,
testing::ValuesIn(predict_dense_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> predict_sparse_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator(0),
fil::leaf_value_t::INT_CLASS_LABEL, 5000},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 6},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::CLASS, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 3},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator(0), fil::leaf_value_t::INT_CLASS_LABEL, 3},
};
TEST_P(PredictSparse16FilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse16FilTest,
testing::ValuesIn(predict_sparse_inputs));
TEST_P(PredictSparse8FilTest, Predict) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, PredictSparse8FilTest,
testing::ValuesIn(predict_sparse_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> import_dense_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGE, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::INT_CLASS_LABEL, 6},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 3},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::INT_CLASS_LABEL, 7},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::INT_CLASS_LABEL, 6},
};
TEST_P(TreeliteDenseFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteDenseFilTest,
testing::ValuesIn(import_dense_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> import_sparse_inputs = {
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::SIGMOID | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGE, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 0, 0,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kGT, fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 8, 50, 0.05,
fil::output_t(fil::output_t::AVG | fil::output_t::CLASS), 1.0, 0.5,
fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE,
fil::leaf_value_t::INT_CLASS_LABEL, 10},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::INT_CLASS_LABEL, 4},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLE, fil::leaf_value_t::INT_CLASS_LABEL, 5},
{20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, 0.5, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::INT_CLASS_LABEL, 3},
};
TEST_P(TreeliteSparse16FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse16FilTest,
testing::ValuesIn(import_sparse_inputs));
TEST_P(TreeliteSparse8FilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteSparse8FilTest,
testing::ValuesIn(import_sparse_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator, FIL implementation, number of classes
std::vector<FilTestParams> import_auto_inputs = {
{20000, 50, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 15, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 19, 50, 0.05, fil::output_t::RAW, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::FLOAT_SCALAR, 0},
{20000, 50, 0.05, 10, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::ALGO_AUTO, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::INT_CLASS_LABEL, 3},
#if 0
{20000, 50, 0.05, 19, 50, 0.05, fil::output_t::AVG, 0, 0,
fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT,
fil::leaf_value_t::INT_CLASS_LABEL, 6},
#endif
};
TEST_P(TreeliteAutoFilTest, Import) { compare(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteAutoFilTest,
testing::ValuesIn(import_auto_inputs));
// rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold,
// global_bias, algo, seed, tolerance, branch comparison operator,
// FIL implementation, number of classes
// adjust test parameters if the sparse8 format changes
std::vector<FilTestParams> import_throw_sparse8_inputs = {
// to many features
{100, 20000, 0.05, 10, 50, 0.05, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE,
42, 2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
// too many tree nodes
{20000, 50, 0.05, 16, 5, 0, fil::output_t::RAW, 0, 0, fil::algo_t::NAIVE, 42,
2e-3f, tl::Operator::kLT, fil::leaf_value_t::FLOAT_SCALAR, 0},
};
TEST_P(TreeliteThrowSparse8FilTest, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests, TreeliteThrowSparse8FilTest,
testing::ValuesIn(import_throw_sparse8_inputs));
} // namespace ML
|
4bafcfa5c0b9244293b8cdedc7bb3f7858baa223.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#include "helper_math.h"
#include <iostream> // for cout
#include "deproject_point.h"
#include "CudaPipeline.h"
#include "cuda_hash_params.h"
#include "hash_functions.h"
#include "assert.h"
#include "icp_kernels.h"
#ifndef MINF
#define MINF __int_as_float(0xff800000)
#endif
#ifndef PINF
#define PINF __int_as_float(0x7f800000)
#endif
#ifndef INF
#define INF __int_as_float(0x7f800000)
#endif
__global__ void renderKernel(HashData hashData, RayCastData rayCastData, const struct rs2_intrinsics * dev_intrin) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
const RayCastParams& rayCastParams = c_rayCastParams;
if (x < rayCastParams.m_width && y < rayCastParams.m_height) {
rayCastData.d_depth[y*rayCastParams.m_width+x] = MINF;
rayCastData.d_depth3[y*rayCastParams.m_width+x] = make_float3(MINF,MINF,MINF,MINF);
rayCastData.d_normals[y*rayCastParams.m_width+x] = make_float3(MINF,MINF,MINF,MINF);
float3 point;
// reusing librealsense cuda kernel to calc point cloud
deproject_pixel_to_point_cuda(&point, dev_intrin, make_float2(float)x, (float)y), 1.0f * (DEPTH_WORLD_MAX - DEPTH_WORLD_MIN) + DEPTH_WORLD_MIN);
float3 camDir = normalize(point);
float3 worldCamPos = rayCastParams.m_viewMatrixInverse * make_float3(0.0f, 0.0f, 0.0f);
float4 w = rayCastParams.m_viewMatrixInverse * make_float4(camDir, 0.0f);
float3 worldDir = normalize(make_float3(w.x, w.y, w.z));
//don't use ray interval splatting
float minInterval = rayCastParams.m_minDepth;
float maxInterval = rayCastParams.m_maxDepth;
// shouldn't this return in the case no interval is found?
if (minInterval == 0 || minInterval == MINF) return;
if (maxInterval == 0 || maxInterval == MINF) return;
rayCastData.traverseCoarseGridSimpleSampleAll(hashData, worldCamPos, worldDir, camDir, make_int3(x,y,1),
minInterval, maxInterval, dev_intrin);
}
}
void CudaPipeline::render() {
// rayIntervalSplatting(hashData, hashParams, cameraData, lastRigidTransform);
if (hashParams.m_numOccupiedBlocks == 0) return;
// 6 verts (2 triangles) per block
assert(m_rayCastParams.m_maxNumVertices > 6*hashParams.m_numOccupiedBlocks);
m_rayCastParams.m_numOccupiedSDFBlocks = hashParams.m_numOccupiedBlocks;
m_rayCastParams.m_viewMatrix = lastRigidTransform.getInverse();
m_rayCastParams.m_viewMatrixInverse = lastRigidTransform;
m_data.updateParams(m_rayCastParams); // !!! debugging
// -----
// renderCS(hashData, m_data, cameraData, m_rayCastParams);
const dim3 gridSize((rayCastParams.m_width + T_PER_BLOCK - 1)/T_PER_BLOCK, (rayCastParams.m_height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( renderKernel), dim3(gridSize), dim3(blockSize), 0, 0, hashData, m_rayCastData, dev_intrin);
getLastCudaError("Failed: renderKernel");
hipLaunchKernelGGL(( computeNormalsDevice), dim3(gridSize), dim3(blockSize), 0, 0, m_rayCastData.d_normals, m_rayCastData.dev_depth3, dev_intrin->width, dev_intrin->height);
getLastCudaError("Failed: computeNormalsDevice");
} | 4bafcfa5c0b9244293b8cdedc7bb3f7858baa223.cu | // CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#include "helper_math.h"
#include <iostream> // for cout
#include "deproject_point.h"
#include "CudaPipeline.h"
#include "cuda_hash_params.h"
#include "hash_functions.h"
#include "assert.h"
#include "icp_kernels.h"
#ifndef MINF
#define MINF __int_as_float(0xff800000)
#endif
#ifndef PINF
#define PINF __int_as_float(0x7f800000)
#endif
#ifndef INF
#define INF __int_as_float(0x7f800000)
#endif
__global__ void renderKernel(HashData hashData, RayCastData rayCastData, const struct rs2_intrinsics * dev_intrin) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
const RayCastParams& rayCastParams = c_rayCastParams;
if (x < rayCastParams.m_width && y < rayCastParams.m_height) {
rayCastData.d_depth[y*rayCastParams.m_width+x] = MINF;
rayCastData.d_depth3[y*rayCastParams.m_width+x] = make_float3(MINF,MINF,MINF,MINF);
rayCastData.d_normals[y*rayCastParams.m_width+x] = make_float3(MINF,MINF,MINF,MINF);
float3 point;
// reusing librealsense cuda kernel to calc point cloud
deproject_pixel_to_point_cuda(&point, dev_intrin, make_float2(float)x, (float)y), 1.0f * (DEPTH_WORLD_MAX - DEPTH_WORLD_MIN) + DEPTH_WORLD_MIN);
float3 camDir = normalize(point);
float3 worldCamPos = rayCastParams.m_viewMatrixInverse * make_float3(0.0f, 0.0f, 0.0f);
float4 w = rayCastParams.m_viewMatrixInverse * make_float4(camDir, 0.0f);
float3 worldDir = normalize(make_float3(w.x, w.y, w.z));
//don't use ray interval splatting
float minInterval = rayCastParams.m_minDepth;
float maxInterval = rayCastParams.m_maxDepth;
// shouldn't this return in the case no interval is found?
if (minInterval == 0 || minInterval == MINF) return;
if (maxInterval == 0 || maxInterval == MINF) return;
rayCastData.traverseCoarseGridSimpleSampleAll(hashData, worldCamPos, worldDir, camDir, make_int3(x,y,1),
minInterval, maxInterval, dev_intrin);
}
}
void CudaPipeline::render() {
// rayIntervalSplatting(hashData, hashParams, cameraData, lastRigidTransform);
if (hashParams.m_numOccupiedBlocks == 0) return;
// 6 verts (2 triangles) per block
assert(m_rayCastParams.m_maxNumVertices > 6*hashParams.m_numOccupiedBlocks);
m_rayCastParams.m_numOccupiedSDFBlocks = hashParams.m_numOccupiedBlocks;
m_rayCastParams.m_viewMatrix = lastRigidTransform.getInverse();
m_rayCastParams.m_viewMatrixInverse = lastRigidTransform;
m_data.updateParams(m_rayCastParams); // !!! debugging
// -----
// renderCS(hashData, m_data, cameraData, m_rayCastParams);
const dim3 gridSize((rayCastParams.m_width + T_PER_BLOCK - 1)/T_PER_BLOCK, (rayCastParams.m_height + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
renderKernel<<<gridSize, blockSize>>>(hashData, m_rayCastData, dev_intrin);
getLastCudaError("Failed: renderKernel");
computeNormalsDevice<<<gridSize, blockSize>>>(m_rayCastData.d_normals, m_rayCastData.dev_depth3, dev_intrin->width, dev_intrin->height);
getLastCudaError("Failed: computeNormalsDevice");
} |
6309b1a9bae74ce7e12546ddc80f31c5fb1f3e0e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdbool.h>
#include <hip/hip_fp16.h>
#define StartAxis(i,axis) int i = blockIdx.axis * blockDim.axis + threadIdx.axis;
#define CUDA_GRID_LOOP_X(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define CUDA_GRID_AXIS_LOOP(i, n, axis) \
for (int i = blockIdx.axis * blockDim.axis + threadIdx.axis; i < n; \
i += blockDim.axis * gridDim.axis)
__device__ __half2 h2agtb(__half2 a, __half2 b, __half gtval, __half leval ){
if (__hbgt2(a,b)){
return __halves2half2(gtval,gtval);
}
return __halves2half2(__hgt(__low2half(a),__low2half(b)) ? gtval : leval,
__hgt(__high2half(a),__high2half(b)) ? gtval : leval);
}
__device__ __half2 h2ageb(__half2 a, __half2 b, __half geval, __half ltval ){
if (__hbge2(a,b)){
return __halves2half2(geval,geval);
}
return __halves2half2(__hge(__low2half(a),__low2half(b)) ? geval : ltval,
__hge(__high2half(a),__high2half(b)) ? geval : ltval);
}
__device__ __half2 h2altb(__half2 a, __half2 b, __half geval, __half ltval ){
if (__hblt2(a,b)){
return __halves2half2(ltval,ltval);
}
return __halves2half2(__hlt(__low2half(a),__low2half(b)) ?ltval: geval,
__hlt(__high2half(a),__high2half(b)) ?ltval: geval);
}
__device__ __half2 h2aleb(__half2 a, __half2 b, __half gtval, __half leval ){
if (__hble2(a,b)){
return __halves2half2(leval,leval);
}
return __halves2half2(__hle(__low2half(a),__low2half(b)) ?leval: gtval,
__hle(__high2half(a),__high2half(b)) ?leval: gtval);
}
extern "C" __global__ void Transpose(int numthreads,
const float *src,
const int *buf,
const int ndims,
float *dest)
{
const int *src_strides = buf;
const int *dest_strides = &buf[ndims];
const int *perm = &buf[ndims * 2];
CUDA_GRID_LOOP_X(destIdx, numthreads)
{
int srcIdx = 0;
int t = destIdx;
for (int i = 0; i < ndims; ++i)
{
const int ratio = t / dest_strides[i];
t -= ratio * dest_strides[i];
srcIdx += (ratio * src_strides[perm[i]]);
}
dest[destIdx] = src[srcIdx];
}
}
/*SwapEveryOther will swap the batches between 2 tensors.
It will be either the even or the odd.
Both tensors have to be equal in size and dims.
if even is >0 then it will do the even batches.
Make sure labels are swapped on host end.
*/
extern "C" __global__ void SwapEveryOther(
const int xThreads, //total batches
const int totalbatches,
float *t1,
float *t2,
const int start,
const int stride)
{
const int BVol = xThreads;
for (int i =start;i<totalbatches;i+=stride)
{
CUDA_GRID_LOOP_X(xIdx, xThreads)
{
const float swapper = t1[(i*BVol)+(xIdx)];
t1[(i*BVol) +xIdx]=t2[(i*BVol)+xIdx];
t2[(i*BVol)+xIdx]=swapper;
}
__syncthreads();
}
}
//SwapUpperLower will swap either the upper or lower batches
//Right Now inverse doesn't do anything
extern "C" __global__ void SwapUpperLower(
const int xThreads, //batchsize
const int yThreads, //batchvol
float *t1,
float *t2,
const int t1upper,
const int t2upper,
const int inverse)
{
const int BVol = yThreads;
if (t1upper>0)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads/2,x)
{
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=xThreads/2 +xIdx;
}
if (xIdx < xThreads && t2Idx<xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads,y)
{
const float swapper = t1[(xIdx*BVol)+(yIdx)];
t1[(xIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(xIdx*BVol)+yIdx]=swapper;
}
}
}
}
else
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads/2,x)
{
const int halfIdx=(xThreads/2)+xIdx;
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=halfIdx;
}
if (halfIdx < xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads,y)
{
const float swapper = t1[(halfIdx*BVol)+(yIdx)];
t1[(halfIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(halfIdx*BVol)+yIdx]=swapper;
}
}
}
}
}
//ShapetoBatch4DNHWC Does a stride shape to batch. Make sure values on receiving end are set to zero when s2b is 0
extern "C" __global__ void ShapetoBatch4DNHWC(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
float *shape,
float *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = yThreads * zThreads;
int batch3 = zThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + xIdx;
int ow = (wstride * j) + yIdx;
if (S2B)
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
//ShapetoBatch4DNCHW Does a stride shape to batch. Make sure values on receiving end are set to zero when s2b is 0
extern "C" __global__ void ShapetoBatch4DNCHW(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
float *shape,
float *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = xThreads * yThreads;
int batch3 = yThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + yIdx;
int ow = (wstride * j) + zIdx;
if (S2B )
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
extern "C" __global__ void NearestNeighborNHWC(
const int aligncorners,
const int threads,
const float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
const float *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (src_y * src_width + src_x) * channels + c;
dest[i] = src_data_n[idx];
}
}
extern "C" __global__ void NearestNeighborNCHW(
const int aligncorners,
const int threads,
const float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
int c = n % channels;
n /= channels;
const float *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (c * src_height * src_width) + (src_y * src_width) + src_x;
dest[i] = src_data_n[idx];
}
}
extern "C" __global__ void NearestNeighborNCHWBack(
const int aligncorners,
const int threads,
float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
float *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
extern "C" __global__ void NearestNeighborNHWCBack(
const int aligncorners,
const int threads,
float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
float *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
extern "C" __global__ void AdaGrad(const int length,
float *weights, //weights input and output
float *dw, //input and will have to set to zero
float *gsum, //storage
const float rate, //input
const float eps,
const float dwalpha)
{ //input
CUDA_GRID_LOOP_X(cell, length)
{
gsum[cell] = gsum[cell] + (dw[cell] * dw[cell]);
weights[cell] += -(rate * dw[cell]) / (sqrtf(gsum[cell]) + eps);
dw[cell] = dw[cell]*dwalpha; //smoothing factor.
}
}
extern "C" __global__ void Adam(const int n,
float *w,
float *gsum,
float *xsum,
float *dw,
const float rate,
const float beta1,
const float beta2,
const float eps,
const float denombeta1,
const float denombeta2,
const float dwalpha)
{
CUDA_GRID_LOOP_X(i, n)
{
gsum[i] = (beta1 * gsum[i]) + ((1.0 - beta1) * dw[i]);
float gsumt = gsum[i] /denombeta1;
xsum[i] = (beta2 * xsum[i]) + ((1.0 - beta2) * (dw[i] * dw[i]));
float xsumt = xsum[i] / denombeta2;
w[i] += -(rate * gsumt) / (sqrtf(xsumt) + eps);
dw[i]= dwalpha*dw[i]; //smoothing factor
}
}
extern "C" __global__ void AdaDelta(const int length,
float *weights, //weights input and output
float *gsum, //storage
float *xsum, //storage
float *dw, //input and will have to set to zero
const float rate, //input
const float eps,
const float ro,
const float dwalpha)
{
CUDA_GRID_LOOP_X(i, length)
{
gsum[i] = (ro * gsum[i]) + ((1.0-ro)*dw[i] * dw[i]);
const float dx = sqrtf((xsum[i]+eps)/(gsum[i]+eps))*dw[i];
xsum[i]=(ro*xsum[i])+((1-ro)*dx*dx);
weights[i] -= dx;
dw[i] = dw[i]*dwalpha;
}
}
/*
//This is paired with the host
extern "C" __global__ void Segment1stDim(const int start_index, const float *src, float *dst, const int size)
{
int i = (blockIdx.y * gridDim.x * blockDim.x) + (blockIdx.x * blockDim.x) + threadIdx.x;
int start_location = start_index * size;
if (i < size)
{
dst[i] = src[start_location + i];
}
}
//This is paired with the host
extern "C" __global__ void Segment1stDimhalf(const int start_index, const __half *src, __half *dst, const int size)
{
int i = (blockIdx.y * gridDim.x * blockDim.x) + (blockIdx.x * blockDim.x) + threadIdx.x;
int start_location = start_index * size;
if (i < size)
{
dst[i] = src[start_location + i];
}
}
*/
extern "C" __global__ void L1L2(
const int length,
float *dw, //input and output
const float *w, //input needs to ba an array
float *l1, //output set to zero
float *l2, //output set to zero
const float batch, // should be an int but just send it as a float
const float decay1, //input
const float decay2)
{ //input
CUDA_GRID_LOOP_X(i, length)
{
atomicAdd(l1, abs(w[i]) * decay1);
atomicAdd(l2, (w[i] * w[i] * decay2) / 2.0);
const float gradl1 = decay1 * (w[i] > 0 ? 1 : -1);
const float gradl2 = w[i] * decay2;
dw[i] = (dw[i] + gradl2 + gradl1) / batch;
}
}
//ThreshForward is kind of memory expensive, mostly because it is experimental.
//To test start the positive at random uniform numbers between .9 and 1.1
//and do the negcoefs between .01 and .2 or something along those lines.
//maybe the threshold should be between -.3 and .3 uniform number
extern "C" __global__ void ThreshForward(const int XThreads,
const int batchsize,
const float *x,
float *y,
const float *negcoefs,
const float *threshhold,
const float *poscoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>threshhold[xIdx])
{
y[stride+xIdx]= x[stride+xIdx]*poscoefs[xIdx];
}
else
{
y[stride+xIdx]= negcoefs[xIdx]*x[stride+xIdx];
}
}
}
}
//Backward
// Max(x,thresh)
extern "C" __global__ void ThreshBackward(const int XThreads,
const int batchsize,
const float *x,
float *dx,
const float *dy,
const float *negcoefs,
float *dnegcoefs,
const float *threshhold,
float *dthreshhold,
const float *poscoefs,
float *dposcoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>threshhold[xIdx])
{
dx[stride+xIdx]= poscoefs[xIdx]*dy[stride+xIdx];
dposcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
}
else
{
dx[stride+xIdx]= negcoefs[xIdx]*dy[stride+xIdx];
dnegcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
}
dthreshhold[xIdx]+=dy[xIdx];
}
}
}
//forwardPrelu does the forward Prelu
extern "C" __global__ void PreluForward(const int XThreads,
const int batchsize,
const float *x,
float *y,
const float *coefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>0)
{
y[stride+xIdx]= x[stride+xIdx];
}
else
{
y[stride+xIdx]= coefs[xIdx]*x[stride+xIdx];
}
}
}
}
//backwardPrelu does the backprop of the parametric float
extern "C" __global__ void PreluBackward(const int XThreads,
const int batchsize,
float *dx,
const float *x,
const float *dy,
const float *coefs,
float *dcoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>0)
{
dx[stride+xIdx]= dy[stride+xIdx];
}
else
{
dx[stride+xIdx]= coefs[xIdx]*dy[stride+xIdx];
dcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
}
}
}
}
/*
Leaky functions
*/
extern "C" __global__ void LeakyForwardAlphaBeta(const int length,
const float *x,
float *y,
const float coef,
const float alpha,
const float beta)
{
CUDA_GRID_LOOP_X(i, length)
{
const float previous = y[i];
if (x[i] > 0.0)
{
const float current = x[i];
y[i] = (beta*previous) + (alpha *current) ;
}
else
{
const float current = x[i]*coef;
y[i] = (beta*previous) + (alpha *current) ;
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlphaBeta(const int length,
const float *x,
float *dx,
const float *dy,
const float coef,
const float alpha,
const float beta)
{
CUDA_GRID_LOOP_X(i, length)
{
const float previous = dx[i];
if (x[i] > 0.0)
{
const float current= dy[i];
dx[i] =(beta *previous) + (current * alpha);
}
else
{
const float current= dy[i]*coef;
dx[i] = (beta *previous) + (current * alpha);
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForwardAlpha(const int length,
const float *x,
float *y,
const float coef,
const float alpha)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
y[i] = alpha *x[i];
}
else
{
const float current=x[i]*coef;
y[i] =current * alpha;
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlpha(const int length,
const float *x,
float *dx,
const float *dy,
const float coef,
const float alpha)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
dx[i] = dy[i]*alpha;
}
else
{
const float current=dy[i]*coef;
dx[i] = current *alpha;
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForward(const int length,
const float *x,
float *y,
const float coef)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
y[i] = x[i];
}
else
{
y[i] = x[i] * coef;
}
}
}
extern "C" __global__ void LeakyBackward(const int length,
const float *x,
float *dx,
const float *dy,
const float coef)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
dx[i] = dy[i];
}
else
{
dx[i] = dy[i] * coef;
}
}
}
extern "C" __global__ void MSELoss(const int length,
float *errors,
const float *target,
const float *networkout,
float *loss,
const float alpha,
const float beta)
{
loss[0]=0;
CUDA_GRID_LOOP_X(i, length)
{
const float y = networkout[i] - target[i];
errors[i] = y;
atomicAdd(loss, (y * y) / 2);
}
}
extern "C" __global__ void MSELossbyBatches(const int xthreads,const int ythreads, float *errors, const float *target, const float *networkout, float *loss)
{
CUDA_GRID_AXIS_LOOP(xIdx,xthreads,x)
{
const int offset=ythreads*xIdx;
CUDA_GRID_AXIS_LOOP(yIdx, ythreads,y)
{
const float y = networkout[offset+yIdx] - target[offset+yIdx];
errors[offset+yIdx] = y;
atomicAdd(&loss[xIdx], (y * y) / 2);
}
}
}
extern "C" __global__ void ConcatNHWCEX(const int XThreads,
const int YThreads,
const int ZThreads,
const int Batches,
const int DestBatchVol,
const int TotalDestChannels,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
CUDA_GRID_AXIS_LOOP(idY,YThreads,y)
{
CUDA_GRID_AXIS_LOOP(idZ,ZThreads,z)
{
int deststride = (i*DestBatchVol)+(idX*YThreads*TotalDestChannels)+(idY*TotalDestChannels)+DestChannelOffset+idZ;
int srcstride = (i*SrcBatchVol)+(idX*YThreads*ZThreads)+(idY*ZThreads)+idZ;
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
}
}
extern "C" __global__ void ConcatNCHWEX(const int XThreads,
const int Batches,
const int DestBatchVol,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
int deststride = (i*DestBatchVol)+(DestChannelOffset+idX);
int srcstride = (i*SrcBatchVol)+(idX);
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
extern "C" __global__ void ConcatNHWCEXHalf(const int XThreads,
const int YThreads,
const int ZThreads,
const int Batches,
const int DestBatchVol,
const int TotalDestChannels,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
CUDA_GRID_AXIS_LOOP(idY,YThreads,y)
{
CUDA_GRID_AXIS_LOOP(idZ,ZThreads,z)
{
int deststride = (i*DestBatchVol)+(idX*YThreads*TotalDestChannels)+(idY*TotalDestChannels)+DestChannelOffset+idZ;
int srcstride = (i*SrcBatchVol)+(idX*YThreads*ZThreads)+(idY*ZThreads)+idZ;
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
}
}
extern "C" __global__ void ConcatNCHWEXHalf(const int XThreads,
const int Batches,
const int DestBatchVol,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
int deststride = (i*DestBatchVol)+(DestChannelOffset+idX);
int srcstride = (i*SrcBatchVol)+(idX);
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
extern "C" __global__ void ConcatForwardNCHW( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
const float *Src1,
const int Channels2,
const int src2vol,
const float *Src2,
float *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+xIdx] = Src1[src1batchstride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+src1vol+xIdx] = Src2[src2batchstride+(j*XThreads)+xIdx];
}
}
}
}
extern "C" __global__ void ConcatBackwardNCHW( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
float *Src1,
const int Channels2,
const int src2vol,
float *Src2,
const float *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src1[src1batchstride+(j*XThreads)+xIdx]= dest[Stride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src2[src2batchstride+(j*XThreads)+xIdx] = dest[Stride+(j*XThreads)+src1vol+xIdx];
}
}
}
}
extern "C" __global__ void ConcatForwardNCHWhalf( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
const __half *Src1,
const int Channels2,
const int src2vol,
const __half *Src2,
__half *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+xIdx] = Src1[src1batchstride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+src1vol+xIdx] = Src2[src2batchstride+(j*XThreads)+xIdx];
}
}
}
}
extern "C" __global__ void ConcatBackwardNCHWhalf( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
__half *Src1,
const int Channels2,
const int src2vol,
__half *Src2,
const __half *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src1[src1batchstride+(j*XThreads)+xIdx]= dest[Stride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src2[src2batchstride+(j*XThreads)+xIdx] = dest[Stride+(j*XThreads)+src1vol+xIdx];
}
}
}
}
//MakePlanarImageBatchesUint8 - for this to work all the each batch should have the same amount of channels and all the channels
//need to be the same size
extern "C" __global__ void MakePlanarImageBatchesUint8(const int XThreads, //Should be channel size
const int Batches,
const int channelsperbatch,
const float *Srcs, //all the channels for everything.
float *dest)
{
const int batchsize = XThreads*channelsperbatch;
for (int i = 0;i<Batches;i++)
{
for (int j = 0;j<channelsperbatch;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[(i*batchsize)+(j*XThreads)+xIdx]=Srcs[(j*XThreads)+xIdx];
}
}
}
}
extern "C" __global__ void TransposeFP16(int numthreads,
const __half *src,
const int *buf,
const int ndims,
__half *dest)
{
const int *src_strides = buf;
const int *dest_strides = &buf[ndims];
const int *perm = &buf[ndims * 2];
CUDA_GRID_LOOP_X(destIdx, numthreads)
{
int srcIdx = 0;
int t = destIdx;
for (int i = 0; i < ndims; ++i)
{
const int ratio = t / dest_strides[i];
t -= ratio * dest_strides[i];
srcIdx += (ratio * src_strides[perm[i]]);
}
dest[destIdx] = src[srcIdx];
}
}
extern "C" __global__ void SwapEveryOtherFP16(
const int n, //total batches
const int totalbatches,
__half *t1,
__half *t2,
const int start,
const int stride)
{
StartAxis(stx,x)
const int BVol = n/2;
__half2 *t1h=(half2 *)t1;
__half2 *t2h=(half2 *)t2;
for (int i =start;i<totalbatches;i+=stride)
{
CUDA_GRID_LOOP_X(xIdx, BVol)
{
const __half2 swapper = t1h[(i*BVol)+(xIdx)];
t1h[(i*BVol) +xIdx]=t2h[(i*BVol)+xIdx];
t2h[(i*BVol)+xIdx]=swapper;
}
if (stx==0 && (n%2)){
const int xIdx=n-1;
const __half swapper = t1[(i*n)+(xIdx)];
t1[(i*n) +(xIdx)]=t1[(i*n)+(xIdx)];
t2[(i*n)+(xIdx)]=swapper;
}
__syncthreads();
}
}
extern "C" __global__ void SwapUpperLowerFP16(
const int xThreads, //batchsize
const int yThreads, //batchvol
__half *t1,
__half *t2,
const int t1upper,
const int t2upper,
const int inverse)
{
const int BVol = yThreads;
if (t1upper>0)
{
CUDA_GRID_AXIS_LOOP(xIdx,xThreads/2,x)
{
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=xThreads/2 +xIdx;
}
if (xIdx < xThreads && t2Idx<xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, BVol,y)
{
const __half swapper = t1[(xIdx*BVol)+(yIdx)];
t1[(xIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(xIdx*BVol)+yIdx]=swapper;
}
}
}
}
else
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads/2,x)
{
const int halfIdx=(xThreads/2)+xIdx;
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=halfIdx;
}
if (halfIdx < xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads,y)
{
const __half swapper = t1[(halfIdx*BVol)+(yIdx)];
t1[(halfIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(halfIdx*BVol)+yIdx]=swapper;
}
}
}
}
}
//ShapetoBatch4DNHWC Does a stride shape to batch. Make sure values on receiving end are set to zero when s2b is 0
extern "C" __global__ void ShapetoBatch4DNHWCFP16(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
__half *shape,
__half *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = yThreads * zThreads;
int batch3 = zThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + xIdx;
int ow = (wstride * j) + yIdx;
if (S2B)
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
extern "C" __global__ void ShapetoBatch4DNCHWFP16(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
__half *shape,
__half *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = xThreads * yThreads;
int batch3 = yThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + yIdx;
int ow = (wstride * j) + zIdx;
if (S2B )
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
extern "C" __global__ void NearestNeighborNCHWFP16(
const int aligncorners,
const int threads,
const __half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
int c = n % channels;
n /= channels;
const __half *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (c * src_height * src_width) + (src_y * src_width) + src_x;
dest[i] = src_data_n[idx];
}
}
#if __CUDA_ARCH__ >= 750 //might not work on other architectures. will probably work best with even tensors.
extern "C" __global__ void NearestNeighborNHWCBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
#else
extern "C" __global__ void NearestNeighborNHWCBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
const __half zer0= __float2half(0.0);
CUDA_GRID_LOOP_X(i, threads-1) //minus one because I do a conversion to half2 wich is 32bit to do the atomic add and don't want to run into space outside of array
{
int n = i;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
const __half2 dsth2 = __halves2half2(dest[i],zer0); // This should give us the value half2[dest,0]
void *vdptr=(void*)(&src_data_n[idx]); //I don't know if I need to do this, but I work with go a lot and wanted to make sure it was going to step correctly
__half2 *srch2hack = (__half2*)(vdptr); //Here say the void pointer address into srch2hack
atomicAdd(srch2hack,dsth2); // this should be (src_data_n[idx]+dest[i], src_data_n[idx+1]+0) //had to do threads -1 so in the last part we don't overstep the bounds
}
//This last part is to do the last value in dest.
int n = threads-1;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
src_data_n[idx] = __hadd(src_data_n[idx], dest[threads-1]);
}
#endif
extern "C" __global__ void NearestNeighborNHWCFP16(
const int aligncorners,
const int threads,
const __half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
const __half *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (src_y * src_width + src_x) * channels + c;
dest[i] = src_data_n[idx];
}
}
#if __CUDA_ARCH__ >= 750 //might not work on other architectures. will probably work best with even tensors.
extern "C" __global__ void NearestNeighborNCHWBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
#else
//Might not work with archs that are not 7.5.. but might work best with even tensors.
extern "C" __global__ void NearestNeighborNCHWBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
const __half zer0= __float2half(0.0);
CUDA_GRID_LOOP_X(i, threads-1) //minus one because I do a conversion to half2 wich is 32bit to do the atomic add and don't want to run into space outside of array
{
int n = i;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
const __half2 dsth2 = __halves2half2(dest[i],zer0); // This should give us the value half2[dest,0]
void *vdptr=(void*)(&src_data_n[idx]); //I don't know if I need to do this, but I work with go a lot and wanted to make sure it was going to step correctly
__half2 *srch2hack = (__half2*)(vdptr); //Here say the void pointer address into srch2hack
atomicAdd(srch2hack,dsth2); // this should be (src_data_n[idx]+dest[i], src_data_n[idx+1]+0) //had to do threads -1 so in the last part we don't overstep the bounds
}
//This last part is to do the last value in dest.
int n = threads-1;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
src_data_n[idx] = __hadd(src_data_n[idx], dest[threads-1]);
}
#endif
extern "C" __global__ void AdaGradFP16(const int n,
__half *w, //w input and output
__half *dw, //input and will have to set to zero
__half *gsum, //storage
const __half rate, //input
const __half eps,
const __half dwalpha)
{ //input
StartAxis(stx,x)
int n2=n/2;
__half2 *w2=(__half2*)w,*dw2=(__half2*)dw,*gsum2=(__half2*)gsum;
const __half2 rate2=__halves2half2(rate,rate);
const __half2 eps2=__halves2half2(eps,eps);
const __half2 dwalpha2=__halves2half2(dwalpha,dwalpha);
CUDA_GRID_LOOP_X(i, n2)
{
__half2 holder = gsum2[i];
gsum2[i] = __hfma2(dw2[i],dw2[i],holder);
w2[i] = __hadd2(-__h2div((__hmul2(rate2,dw2[i])) , (__hadd2(h2sqrt(gsum2[i]), eps2))),w2[i]);
dw2[i] =__hmul2(dw2[i],dwalpha2);
}
if (stx==0 && (n%2)){
__half holder = gsum[n-1];
gsum[n-1] = __hfma(dw[n-1],dw[n-1],holder);
w[n-1] = -__hdiv((__hmul(rate,dw[n-1])) , (__hadd(hsqrt(gsum[n-1]), eps)));
dw[n-1] =__hmul(dw[n-1],dwalpha);
}
}
//Need to fix this.
extern "C" __global__ void AdamFP16(const int n,
__half *w,
__half *gsum,
__half *xsum,
__half *dw,
const __half rate,
const __half beta1,
const __half beta2,
const __half eps,
const __half denombeta1,
const __half denombeta2,
const __half dwalpha)
{
int n2=n/2;
__half2 *w2=(__half2*)w,*dw2=(__half2*)dw,*gsum2=(__half2*)gsum,*xsum2=(__half2*)xsum;
const __half2 rate2=__halves2half2(rate,rate);
const __half2 eps2=__halves2half2(eps,eps);
const __half2 dwalpha2=__halves2half2(dwalpha,dwalpha);
const __half2 beta12=__halves2half2(beta1,beta1);
const __half2 beta22=__halves2half2(beta2,beta2);
const __half one1 = __float2half(1.0);
const __half2 one2=__halves2half2(one1,one1);
StartAxis(stx,x)
CUDA_GRID_LOOP_X(i, n2)
{
gsum2[i] =__hfma2(__hsub2(one2,beta12),dw2[i],__hmul2(beta12,gsum2[i]));
__half2 gsumt = __h2div(gsum2[i] ,__halves2half2(denombeta1,denombeta1));
xsum2[i] = __hfma2(beta22 , xsum2[i], __hmul2(__hsub2(one2, beta22), __hmul2(dw2[i] , dw2[i])));
__half2 xsumt = __h2div(xsum2[i] , __halves2half2(denombeta2,denombeta2));
w2[i]=__hsub2(w2[i],__h2div(__hmul2(rate2,gsumt),__hadd2(h2sqrt(xsumt),eps2)));
dw2[i]= __hmul2(dwalpha2,dw2[i]);
}
if (stx==0 && (n%2)){
const int i = n-1;
gsum[i] =__hfma(__hsub(one1,beta1),dw[i],__hmul(beta1,gsum[i]));
__half gsumt = __hdiv(gsum[i] ,denombeta1);
xsum[i] = __hfma(beta2 , xsum[i], __hmul(__hsub(one1, beta2), __hmul(dw[i] , dw[i])));
__half xsumt = __hdiv(xsum[i] , denombeta2);
w[i]=__hsub(w[i],__hdiv(__hmul(rate,gsumt),__hadd(hsqrt(xsumt),eps)));
dw[i]= __hmul(dwalpha,dw[i]);
}
}
extern "C" __global__ void AdaDeltaFP16(const int n,
__half *w, //weights input and output
__half *gsum, //storage
__half *xsum, //storage
__half *dw, //input and will have to set to zero
const __half rate, //input
const __half eps,
const __half ro,
const __half dwalpha)
{
StartAxis(stx,x)
int n2=n/2;
__half2 *w2=(__half2*)w,*dw2=(__half2*)dw,*gsum2=(__half2*)gsum,*xsum2=(__half2*)xsum;
const __half2 rate2=__halves2half2(rate,rate);
const __half2 eps2=__halves2half2(eps,eps);
const __half2 ro2=__halves2half2(ro,ro);
const __half one1 = __float2half(1.0);
const __half2 one2=__halves2half2(one1,one1);
const __half2 dwalpha2=__halves2half2(dwalpha,dwalpha);
CUDA_GRID_LOOP_X(i, n2)
{
gsum2[i]= __hfma2(__hsub2(one2,ro2),__hmul2(dw2[i],dw2[i]),__hmul2(ro2,gsum2[i]));
const __half2 dx2= __hmul2(h2sqrt(__h2div(__hadd2(xsum2[i],eps2),__hadd2(gsum2[i],eps2))),dw2[i]);
xsum2[i]= __hfma2(__hsub2(one2,ro2),__hmul2(dx2,dx2),__hmul2(ro2,xsum2[i]));
w2[i] =__hsub2(w2[i],dx2);
dw2[i] = __hmul2(dw2[i],dwalpha2);
}
if (stx ==0 &&(n%2)){
int i = n-1;
gsum[i]= __hfma(__hsub(one1,ro),__hmul(dw[i],dw[i]),__hmul(ro,gsum[i]));
const __half dx= __hmul(hsqrt(__hdiv(__hadd(xsum[i],eps),__hadd(gsum[i],eps))),dw[i]);
xsum[i]= __hfma(__hsub(one1,ro),__hmul(dx,dx),__hmul(ro,xsum[i]));
w[i] =__hsub(w[i],dx);
dw[i] = __hmul(dw[i],dwalpha);
}
}
#if __CUDA_ARCH__ >= 750
extern "C" __global__ void L1L2FP16(
const int length,
__half *dw, //input and output
const __half *w, //input needs to ba an array
__half *l1, //output set to zero
__half *l2, //output set to zero
const __half batch, // should be an int but just send it as a float
const __half decay1, //input
const __half decay2)
{ //input
const __half one1 = __float2half(1.0);
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
__half abs = w[i];
if (__hlt(abs,zero0)){
abs=-abs;
}
//atomicAdd(l1, abs(w[i]) * decay1);
atomicAdd(l1,__hmul(abs,decay1));
//atomicAdd(l2, (w[i] * w[i] * decay2) / 2.0);
atomicAdd(l2, __hdiv(__hmul(__hmul(w[i] , w[i]) , decay2) , 2.0));
//const float gradl1 = decay1 * (w[i] > 0 ? 1 : -1);
const __half gradl1 = __hmul(decay1, (__hgt(w[i],zero0) ? one1 : -one1));
//const float gradl2 = w[i] * decay2;
const __half gradl2 = __hmul(w[i] ,decay2);
//dw[i] = (dw[i] + gradl2 + gradl1) / batch;
dw[i] = __hdiv(__hadd(__hadd(dw[i], gradl2) , gradl1) , batch);
}
}
#else
extern "C" __global__ void L1L2FP16(
const int length,
__half *dw, //input and output
const __half *w, //input needs to ba an array
__half *l1, //output set to zero
__half *l2, //output set to zero
const __half batch, // should be an int but just send it as a float
const __half decay1, //input
const __half decay2)
{ //input
const __half one1 = __float2half(1.0);
const __half zero0 = __float2half(0);
__shared__ __half2 *l1l2h2;
__half2 *l1h2=&l1l2h2[0];
__half2 *l2h2=&l1l2h2[1];
CUDA_GRID_LOOP_X(i, length)
{
__half abs = w[i];
if (__hlt(abs,zero0)){
abs=-abs;
}
//atomicAdd(l1, abs(w[i]) * decay1);
const __half2 result= __halves2half2( __hmul(abs,decay1),zero0);
atomicAdd(l1h2,result);
//atomicAdd(l2, (w[i] * w[i] * decay2) / 2.0);
const __half2 result2= __halves2half2(__hdiv(__hmul(__hmul(w[i] , w[i]) , decay2) , 2.0),zero0);
atomicAdd(l2h2,result2 );
//const float gradl1 = decay1 * (w[i] > 0 ? 1 : -1);
const __half gradl1 = __hmul(decay1, (__hgt(w[i],zero0) ? one1 : -one1));
//const float gradl2 = w[i] * decay2;
const __half gradl2 = __hmul(w[i] ,decay2);
//dw[i] = (dw[i] + gradl2 + gradl1) / batch;
dw[i] = __hdiv(__hadd(__hadd(dw[i], gradl2) , gradl1) , batch);
}
l1[0]=__low2half(l1h2[0]);
l2[0]=__low2half(l2h2[0]);
}
#endif
extern "C" __global__ void ThreshForwardFP16(const int XThreads,
const int batchsize,
const __half *x,
__half *y,
const __half *negcoefs,
const __half *threshhold,
const __half *poscoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],threshhold[xIdx]))
{
y[stride+xIdx]= __hmul(x[stride+xIdx],poscoefs[xIdx]);
}
else
{
y[stride+xIdx]= __hmul(negcoefs[xIdx],x[stride+xIdx]);
}
}
}
}
extern "C" __global__ void ThreshBackwardFP16(const int XThreads,
const int batchsize,
const __half *x,
__half *dx,
const __half *dy,
const __half *negcoefs,
__half *dnegcoefs,
const __half *threshhold,
const __half *poscoefs,
__half *dposcoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],threshhold[xIdx]))
{
// dx[stride+xIdx]= poscoefs[xIdx]*dy[stride+xIdx];
dx[stride+xIdx]=__hmul(dy[stride+xIdx],poscoefs[xIdx]);
// dposcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
dposcoefs[xIdx]=__hfma(dy[xIdx],x[stride+xIdx],dposcoefs[xIdx]);
}
else
{
// dx[stride+xIdx]= negcoefs[xIdx]*dy[stride+xIdx];
dx[stride+xIdx]= __hmul(dy[stride+xIdx],negcoefs[xIdx]);
// dnegcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
dnegcoefs[xIdx]=__hfma(dy[xIdx],x[stride+xIdx],dnegcoefs[xIdx]);
}
}
}
}
extern "C" __global__ void PreluForwardFP16(const int XThreads,
const int batchsize,
const __half *x,
__half *y,
const __half *coefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],0))
{
y[stride+xIdx]= x[stride+xIdx];
}
else
{
y[stride+xIdx]= __hmul(coefs[xIdx],x[stride+xIdx]);
}
}
}
}
extern "C" __global__ void PreluBackwardFP16(const int XThreads,
const int batchsize,
__half *dx,
const __half *x,
const __half *dy,
const __half *coefs,
__half *dcoefs)
{
const __half zero0 = __float2half(0);
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],zero0))
{
dx[stride+xIdx]= dy[stride+xIdx];
}
else
{
// dx[stride+xIdx]= coefs[xIdx]*dy[stride+xIdx];
dx[stride+xIdx]= __hmul(coefs[xIdx],dy[stride+xIdx]);
// dcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
dcoefs[xIdx]=__hfma(dy[xIdx],x[stride+xIdx],dcoefs[xIdx]);
}
}
}
}
extern "C" __global__ void LeakyForwardAlphaBetaFP16(const int length,
const __half *x,
__half *y,
const __half coef,
const __half alpha,
const __half beta)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
// y[i] = (beta*y[i]) + (alpha *x[i]) ;
y[i]=__hadd(__hmul(beta,y[i]),__hmul(alpha,x[i]));
}
else
{
//y[i] = (beta*previous) + (alpha *x[i]*coef);
y[i]=__hadd(__hmul(beta,y[i]),__hmul(alpha,__hmul(x[i],coef)));
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlphaBetaFP16(const int length,
const __half *x,
__half *dx,
const __half *dy,
const __half coef,
const __half alpha,
const __half beta)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
// dx[i] =(beta *dx[i]) + (dy[i] * alpha);
dx[i]=__hadd(__hmul(beta,dy[i]),__hmul(alpha,dx[i]));
}
else
{
// dx[i] = (beta *dx[i]) + (dy[i]*coef * alpha);
dx[i]=__hadd(__hmul(beta,dx[i]),__hmul(alpha,__hmul(dy[i],coef)));
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForwardAlphaFP16(const int length,
const __half *x,
__half *y,
const __half coef,
const __half alpha)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
y[i] = __hmul(alpha ,x[i]);
}
else
{
y[i] =__hmul(__hmul(x[i],coef) , alpha);
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlphaFP16(const int length,
const __half *x,
__half *dx,
const __half *dy,
const __half coef,
const __half alpha)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
// dx[i] = dy[i]*alpha;
dx[i] = __hmul(alpha ,dy[i]);
}
else
{
// dx[i] = dy[i]*coef *alpha;
dx[i] =__hmul(__hmul(dy[i],coef) , alpha);
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForwardFP16(const int length,
const __half *x,
__half *y,
const __half coef)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
y[i] = x[i];
}
else
{
// y[i] = x[i] * coef;
y[i]= __hmul( x[i] , coef);
}
}
}
extern "C" __global__ void LeakyBackwardFP16(const int length,
const __half *x,
__half *dx,
const __half *dy,
const __half coef)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
dx[i] = dy[i];
}
else
{
// dx[i] = dy[i] * coef;
dx[i]= __hmul( dy[i] , coef);
}
}
}
#if __CUDA_ARCH__ >= 750
extern "C" __global__ void MSELossbyBatchesFP16(const int xthreads,
const int ythreads,
__half *errors,
const __half *target,
const __half *networkout,
__half *loss)
{
const __half htwo= __float2half(2.0);
CUDA_GRID_AXIS_LOOP(xIdx,xthreads,x)
{
const int i=ythreads*xIdx;
CUDA_GRID_AXIS_LOOP(yIdx, ythreads,y)
{
const __half y = __hsub(networkout[i] , target[i]);
errors[i] = y;
atomicAdd(&loss[xIdx], __hdiv(__hmul(y , y) , htwo));
}
}
}
extern "C" __global__ void MSELossFP16(const int n,
__half *errors,
const __half *target,
const __half *networkout,
__half *loss,
const __half alpha,
const __half beta)
{
StartAxis(stx,x)
int n2=n/2;
__half2 *errors2=(__half2*)errors, *target2=(__half2*)target, *networkout2=(__half2*)networkout, *loss2=(__half2*)loss;
// const __half2 alpha2=__halves2half2(alpha), beta2=__halves2half2(beta);
const __half2 htwo2=__halves2half2(__float2half(2.0),__float2half(2.0));
const __half htwo= __float2half(2.0);
loss[0]=0;
CUDA_GRID_LOOP_X(i, n2)
{
const __half2 y = __hsub2(networkout2[i] , target2[i]);
errors2[i] = y;
atomicAdd(loss2, __h2div(__hmul2(y , y) ,htwo2));
}
if (stx==0 && (n%2)){
const int i=n-1;
const __half y = __hsub(networkout[i] , target[i]);
errors[i] = y;
atomicAdd(loss, __hdiv(__hmul(y , y) , htwo));
}
}
#else
extern "C" __global__ void MSELossbyBatchesFP16(
const int xthreads,
const int batches,
__half2 *errors,
const __half2 *target,
const __half2 *networkout,
__half *loss)
{
const __half htwo= __float2half(2.0);
const __half2 htwo2 =__halves2half2(htwo,htwo);
const int n=xthreads/2;
__shared__ __half2 *loss2;
for (int i=0; i<batches;i++){
loss2[i]=__floats2half2_rn(0.0,0.0);
CUDA_GRID_AXIS_LOOP(xIdx,n,x)
{
const __half2 y = __hsub2(networkout[i*n+xIdx] , target[i*n+xIdx]);
errors[i] = y;
atomicAdd(&loss2[i], __h2div(__hmul2(y , y) , htwo2));
}
loss[i]=__hadd(__low2half(loss2[i]),__high2half(loss2[i]));
}
}
extern "C" __global__ void MSELossFP16(const int n,
__half2 *errors,
const __half2 *target,
const __half2 *networkout,
__half *loss,
const __half alpha,
const __half beta)
{
// StartAxis(stx,x)
int n2=n/2;
// const __half2 alpha2=__halves2half2(alpha), beta2=__halves2half2(beta);
const __half2 htwo2=__halves2half2(__float2half(2.0),__float2half(2.0));
// const __half htwo= __float2half(2.0);
__shared__ __half2 *loss2;
loss2[0]= __halves2half2(__float2half(0.0),__float2half(0.0));
CUDA_GRID_LOOP_X(i, n2)
{
const __half2 y = __hsub2(networkout[i] , target[i]);
errors[i] = y;
atomicAdd(loss2, __h2div(__hmul2(y , y) ,htwo2));
}
loss[0]=__hadd(__low2half(loss2[0]),__high2half(loss2[0]));
}
#endif | 6309b1a9bae74ce7e12546ddc80f31c5fb1f3e0e.cu | #include <cuda.h>
#include <stdbool.h>
#include <cuda_fp16.h>
#define StartAxis(i,axis) int i = blockIdx.axis * blockDim.axis + threadIdx.axis;
#define CUDA_GRID_LOOP_X(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define CUDA_GRID_AXIS_LOOP(i, n, axis) \
for (int i = blockIdx.axis * blockDim.axis + threadIdx.axis; i < n; \
i += blockDim.axis * gridDim.axis)
__device__ __half2 h2agtb(__half2 a, __half2 b, __half gtval, __half leval ){
if (__hbgt2(a,b)){
return __halves2half2(gtval,gtval);
}
return __halves2half2(__hgt(__low2half(a),__low2half(b)) ? gtval : leval,
__hgt(__high2half(a),__high2half(b)) ? gtval : leval);
}
__device__ __half2 h2ageb(__half2 a, __half2 b, __half geval, __half ltval ){
if (__hbge2(a,b)){
return __halves2half2(geval,geval);
}
return __halves2half2(__hge(__low2half(a),__low2half(b)) ? geval : ltval,
__hge(__high2half(a),__high2half(b)) ? geval : ltval);
}
__device__ __half2 h2altb(__half2 a, __half2 b, __half geval, __half ltval ){
if (__hblt2(a,b)){
return __halves2half2(ltval,ltval);
}
return __halves2half2(__hlt(__low2half(a),__low2half(b)) ?ltval: geval,
__hlt(__high2half(a),__high2half(b)) ?ltval: geval);
}
__device__ __half2 h2aleb(__half2 a, __half2 b, __half gtval, __half leval ){
if (__hble2(a,b)){
return __halves2half2(leval,leval);
}
return __halves2half2(__hle(__low2half(a),__low2half(b)) ?leval: gtval,
__hle(__high2half(a),__high2half(b)) ?leval: gtval);
}
extern "C" __global__ void Transpose(int numthreads,
const float *src,
const int *buf,
const int ndims,
float *dest)
{
const int *src_strides = buf;
const int *dest_strides = &buf[ndims];
const int *perm = &buf[ndims * 2];
CUDA_GRID_LOOP_X(destIdx, numthreads)
{
int srcIdx = 0;
int t = destIdx;
for (int i = 0; i < ndims; ++i)
{
const int ratio = t / dest_strides[i];
t -= ratio * dest_strides[i];
srcIdx += (ratio * src_strides[perm[i]]);
}
dest[destIdx] = src[srcIdx];
}
}
/*SwapEveryOther will swap the batches between 2 tensors.
It will be either the even or the odd.
Both tensors have to be equal in size and dims.
if even is >0 then it will do the even batches.
Make sure labels are swapped on host end.
*/
extern "C" __global__ void SwapEveryOther(
const int xThreads, //total batches
const int totalbatches,
float *t1,
float *t2,
const int start,
const int stride)
{
const int BVol = xThreads;
for (int i =start;i<totalbatches;i+=stride)
{
CUDA_GRID_LOOP_X(xIdx, xThreads)
{
const float swapper = t1[(i*BVol)+(xIdx)];
t1[(i*BVol) +xIdx]=t2[(i*BVol)+xIdx];
t2[(i*BVol)+xIdx]=swapper;
}
__syncthreads();
}
}
//SwapUpperLower will swap either the upper or lower batches
//Right Now inverse doesn't do anything
extern "C" __global__ void SwapUpperLower(
const int xThreads, //batchsize
const int yThreads, //batchvol
float *t1,
float *t2,
const int t1upper,
const int t2upper,
const int inverse)
{
const int BVol = yThreads;
if (t1upper>0)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads/2,x)
{
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=xThreads/2 +xIdx;
}
if (xIdx < xThreads && t2Idx<xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads,y)
{
const float swapper = t1[(xIdx*BVol)+(yIdx)];
t1[(xIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(xIdx*BVol)+yIdx]=swapper;
}
}
}
}
else
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads/2,x)
{
const int halfIdx=(xThreads/2)+xIdx;
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=halfIdx;
}
if (halfIdx < xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads,y)
{
const float swapper = t1[(halfIdx*BVol)+(yIdx)];
t1[(halfIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(halfIdx*BVol)+yIdx]=swapper;
}
}
}
}
}
//ShapetoBatch4DNHWC Does a stride shape to batch. Make sure values on receiving end are set to zero when s2b is 0
extern "C" __global__ void ShapetoBatch4DNHWC(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
float *shape,
float *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = yThreads * zThreads;
int batch3 = zThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + xIdx;
int ow = (wstride * j) + yIdx;
if (S2B)
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
//ShapetoBatch4DNCHW Does a stride shape to batch. Make sure values on receiving end are set to zero when s2b is 0
extern "C" __global__ void ShapetoBatch4DNCHW(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
float *shape,
float *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = xThreads * yThreads;
int batch3 = yThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + yIdx;
int ow = (wstride * j) + zIdx;
if (S2B )
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
extern "C" __global__ void NearestNeighborNHWC(
const int aligncorners,
const int threads,
const float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
const float *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (src_y * src_width + src_x) * channels + c;
dest[i] = src_data_n[idx];
}
}
extern "C" __global__ void NearestNeighborNCHW(
const int aligncorners,
const int threads,
const float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
int c = n % channels;
n /= channels;
const float *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (c * src_height * src_width) + (src_y * src_width) + src_x;
dest[i] = src_data_n[idx];
}
}
extern "C" __global__ void NearestNeighborNCHWBack(
const int aligncorners,
const int threads,
float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
float *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
extern "C" __global__ void NearestNeighborNHWCBack(
const int aligncorners,
const int threads,
float *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
float *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
float *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
extern "C" __global__ void AdaGrad(const int length,
float *weights, //weights input and output
float *dw, //input and will have to set to zero
float *gsum, //storage
const float rate, //input
const float eps,
const float dwalpha)
{ //input
CUDA_GRID_LOOP_X(cell, length)
{
gsum[cell] = gsum[cell] + (dw[cell] * dw[cell]);
weights[cell] += -(rate * dw[cell]) / (sqrtf(gsum[cell]) + eps);
dw[cell] = dw[cell]*dwalpha; //smoothing factor.
}
}
extern "C" __global__ void Adam(const int n,
float *w,
float *gsum,
float *xsum,
float *dw,
const float rate,
const float beta1,
const float beta2,
const float eps,
const float denombeta1,
const float denombeta2,
const float dwalpha)
{
CUDA_GRID_LOOP_X(i, n)
{
gsum[i] = (beta1 * gsum[i]) + ((1.0 - beta1) * dw[i]);
float gsumt = gsum[i] /denombeta1;
xsum[i] = (beta2 * xsum[i]) + ((1.0 - beta2) * (dw[i] * dw[i]));
float xsumt = xsum[i] / denombeta2;
w[i] += -(rate * gsumt) / (sqrtf(xsumt) + eps);
dw[i]= dwalpha*dw[i]; //smoothing factor
}
}
extern "C" __global__ void AdaDelta(const int length,
float *weights, //weights input and output
float *gsum, //storage
float *xsum, //storage
float *dw, //input and will have to set to zero
const float rate, //input
const float eps,
const float ro,
const float dwalpha)
{
CUDA_GRID_LOOP_X(i, length)
{
gsum[i] = (ro * gsum[i]) + ((1.0-ro)*dw[i] * dw[i]);
const float dx = sqrtf((xsum[i]+eps)/(gsum[i]+eps))*dw[i];
xsum[i]=(ro*xsum[i])+((1-ro)*dx*dx);
weights[i] -= dx;
dw[i] = dw[i]*dwalpha;
}
}
/*
//This is paired with the host
extern "C" __global__ void Segment1stDim(const int start_index, const float *src, float *dst, const int size)
{
int i = (blockIdx.y * gridDim.x * blockDim.x) + (blockIdx.x * blockDim.x) + threadIdx.x;
int start_location = start_index * size;
if (i < size)
{
dst[i] = src[start_location + i];
}
}
//This is paired with the host
extern "C" __global__ void Segment1stDimhalf(const int start_index, const __half *src, __half *dst, const int size)
{
int i = (blockIdx.y * gridDim.x * blockDim.x) + (blockIdx.x * blockDim.x) + threadIdx.x;
int start_location = start_index * size;
if (i < size)
{
dst[i] = src[start_location + i];
}
}
*/
extern "C" __global__ void L1L2(
const int length,
float *dw, //input and output
const float *w, //input needs to ba an array
float *l1, //output set to zero
float *l2, //output set to zero
const float batch, // should be an int but just send it as a float
const float decay1, //input
const float decay2)
{ //input
CUDA_GRID_LOOP_X(i, length)
{
atomicAdd(l1, abs(w[i]) * decay1);
atomicAdd(l2, (w[i] * w[i] * decay2) / 2.0);
const float gradl1 = decay1 * (w[i] > 0 ? 1 : -1);
const float gradl2 = w[i] * decay2;
dw[i] = (dw[i] + gradl2 + gradl1) / batch;
}
}
//ThreshForward is kind of memory expensive, mostly because it is experimental.
//To test start the positive at random uniform numbers between .9 and 1.1
//and do the negcoefs between .01 and .2 or something along those lines.
//maybe the threshold should be between -.3 and .3 uniform number
extern "C" __global__ void ThreshForward(const int XThreads,
const int batchsize,
const float *x,
float *y,
const float *negcoefs,
const float *threshhold,
const float *poscoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>threshhold[xIdx])
{
y[stride+xIdx]= x[stride+xIdx]*poscoefs[xIdx];
}
else
{
y[stride+xIdx]= negcoefs[xIdx]*x[stride+xIdx];
}
}
}
}
//Backward
// Max(x,thresh)
extern "C" __global__ void ThreshBackward(const int XThreads,
const int batchsize,
const float *x,
float *dx,
const float *dy,
const float *negcoefs,
float *dnegcoefs,
const float *threshhold,
float *dthreshhold,
const float *poscoefs,
float *dposcoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>threshhold[xIdx])
{
dx[stride+xIdx]= poscoefs[xIdx]*dy[stride+xIdx];
dposcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
}
else
{
dx[stride+xIdx]= negcoefs[xIdx]*dy[stride+xIdx];
dnegcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
}
dthreshhold[xIdx]+=dy[xIdx];
}
}
}
//forwardPrelu does the forward Prelu
extern "C" __global__ void PreluForward(const int XThreads,
const int batchsize,
const float *x,
float *y,
const float *coefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>0)
{
y[stride+xIdx]= x[stride+xIdx];
}
else
{
y[stride+xIdx]= coefs[xIdx]*x[stride+xIdx];
}
}
}
}
//backwardPrelu does the backprop of the parametric float
extern "C" __global__ void PreluBackward(const int XThreads,
const int batchsize,
float *dx,
const float *x,
const float *dy,
const float *coefs,
float *dcoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (x[stride+xIdx]>0)
{
dx[stride+xIdx]= dy[stride+xIdx];
}
else
{
dx[stride+xIdx]= coefs[xIdx]*dy[stride+xIdx];
dcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
}
}
}
}
/*
Leaky functions
*/
extern "C" __global__ void LeakyForwardAlphaBeta(const int length,
const float *x,
float *y,
const float coef,
const float alpha,
const float beta)
{
CUDA_GRID_LOOP_X(i, length)
{
const float previous = y[i];
if (x[i] > 0.0)
{
const float current = x[i];
y[i] = (beta*previous) + (alpha *current) ;
}
else
{
const float current = x[i]*coef;
y[i] = (beta*previous) + (alpha *current) ;
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlphaBeta(const int length,
const float *x,
float *dx,
const float *dy,
const float coef,
const float alpha,
const float beta)
{
CUDA_GRID_LOOP_X(i, length)
{
const float previous = dx[i];
if (x[i] > 0.0)
{
const float current= dy[i];
dx[i] =(beta *previous) + (current * alpha);
}
else
{
const float current= dy[i]*coef;
dx[i] = (beta *previous) + (current * alpha);
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForwardAlpha(const int length,
const float *x,
float *y,
const float coef,
const float alpha)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
y[i] = alpha *x[i];
}
else
{
const float current=x[i]*coef;
y[i] =current * alpha;
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlpha(const int length,
const float *x,
float *dx,
const float *dy,
const float coef,
const float alpha)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
dx[i] = dy[i]*alpha;
}
else
{
const float current=dy[i]*coef;
dx[i] = current *alpha;
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForward(const int length,
const float *x,
float *y,
const float coef)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
y[i] = x[i];
}
else
{
y[i] = x[i] * coef;
}
}
}
extern "C" __global__ void LeakyBackward(const int length,
const float *x,
float *dx,
const float *dy,
const float coef)
{
CUDA_GRID_LOOP_X(i, length)
{
if (x[i] > 0.0)
{
dx[i] = dy[i];
}
else
{
dx[i] = dy[i] * coef;
}
}
}
extern "C" __global__ void MSELoss(const int length,
float *errors,
const float *target,
const float *networkout,
float *loss,
const float alpha,
const float beta)
{
loss[0]=0;
CUDA_GRID_LOOP_X(i, length)
{
const float y = networkout[i] - target[i];
errors[i] = y;
atomicAdd(loss, (y * y) / 2);
}
}
extern "C" __global__ void MSELossbyBatches(const int xthreads,const int ythreads, float *errors, const float *target, const float *networkout, float *loss)
{
CUDA_GRID_AXIS_LOOP(xIdx,xthreads,x)
{
const int offset=ythreads*xIdx;
CUDA_GRID_AXIS_LOOP(yIdx, ythreads,y)
{
const float y = networkout[offset+yIdx] - target[offset+yIdx];
errors[offset+yIdx] = y;
atomicAdd(&loss[xIdx], (y * y) / 2);
}
}
}
extern "C" __global__ void ConcatNHWCEX(const int XThreads,
const int YThreads,
const int ZThreads,
const int Batches,
const int DestBatchVol,
const int TotalDestChannels,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
CUDA_GRID_AXIS_LOOP(idY,YThreads,y)
{
CUDA_GRID_AXIS_LOOP(idZ,ZThreads,z)
{
int deststride = (i*DestBatchVol)+(idX*YThreads*TotalDestChannels)+(idY*TotalDestChannels)+DestChannelOffset+idZ;
int srcstride = (i*SrcBatchVol)+(idX*YThreads*ZThreads)+(idY*ZThreads)+idZ;
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
}
}
extern "C" __global__ void ConcatNCHWEX(const int XThreads,
const int Batches,
const int DestBatchVol,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
int deststride = (i*DestBatchVol)+(DestChannelOffset+idX);
int srcstride = (i*SrcBatchVol)+(idX);
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
extern "C" __global__ void ConcatNHWCEXHalf(const int XThreads,
const int YThreads,
const int ZThreads,
const int Batches,
const int DestBatchVol,
const int TotalDestChannels,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
CUDA_GRID_AXIS_LOOP(idY,YThreads,y)
{
CUDA_GRID_AXIS_LOOP(idZ,ZThreads,z)
{
int deststride = (i*DestBatchVol)+(idX*YThreads*TotalDestChannels)+(idY*TotalDestChannels)+DestChannelOffset+idZ;
int srcstride = (i*SrcBatchVol)+(idX*YThreads*ZThreads)+(idY*ZThreads)+idZ;
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
}
}
extern "C" __global__ void ConcatNCHWEXHalf(const int XThreads,
const int Batches,
const int DestBatchVol,
const int DestChannelOffset,
float *src,
const int SrcBatchVol,
float *dest,
bool forward)
{
for (int i=0;i<Batches;i++){
CUDA_GRID_AXIS_LOOP(idX,XThreads,x)
{
int deststride = (i*DestBatchVol)+(DestChannelOffset+idX);
int srcstride = (i*SrcBatchVol)+(idX);
if (forward){
dest[deststride]=src[srcstride];
}else{
src[srcstride]=dest[deststride];
}
}
}
}
extern "C" __global__ void ConcatForwardNCHW( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
const float *Src1,
const int Channels2,
const int src2vol,
const float *Src2,
float *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+xIdx] = Src1[src1batchstride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+src1vol+xIdx] = Src2[src2batchstride+(j*XThreads)+xIdx];
}
}
}
}
extern "C" __global__ void ConcatBackwardNCHW( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
float *Src1,
const int Channels2,
const int src2vol,
float *Src2,
const float *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src1[src1batchstride+(j*XThreads)+xIdx]= dest[Stride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src2[src2batchstride+(j*XThreads)+xIdx] = dest[Stride+(j*XThreads)+src1vol+xIdx];
}
}
}
}
extern "C" __global__ void ConcatForwardNCHWhalf( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
const __half *Src1,
const int Channels2,
const int src2vol,
const __half *Src2,
__half *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+xIdx] = Src1[src1batchstride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[Stride+(j*XThreads)+src1vol+xIdx] = Src2[src2batchstride+(j*XThreads)+xIdx];
}
}
}
}
extern "C" __global__ void ConcatBackwardNCHWhalf( const int XThreads,
const int Batches,
const int Channels1,
const int src1vol,
__half *Src1,
const int Channels2,
const int src2vol,
__half *Src2,
const __half *dest)
{
for (int i = 0;i<Batches;i++)
{
const int Stride= Batches*(src1vol+src2vol);
const int src1batchstride=src1vol*i;
const int src2batchstride=src2vol*i;
for (int j=0;j<Channels1;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src1[src1batchstride+(j*XThreads)+xIdx]= dest[Stride+(j*XThreads)+xIdx];
}
}
for (int j=0;j<Channels2;j++){
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
Src2[src2batchstride+(j*XThreads)+xIdx] = dest[Stride+(j*XThreads)+src1vol+xIdx];
}
}
}
}
//MakePlanarImageBatchesUint8 - for this to work all the each batch should have the same amount of channels and all the channels
//need to be the same size
extern "C" __global__ void MakePlanarImageBatchesUint8(const int XThreads, //Should be channel size
const int Batches,
const int channelsperbatch,
const float *Srcs, //all the channels for everything.
float *dest)
{
const int batchsize = XThreads*channelsperbatch;
for (int i = 0;i<Batches;i++)
{
for (int j = 0;j<channelsperbatch;j++)
{
CUDA_GRID_LOOP_X(xIdx, XThreads)
{
dest[(i*batchsize)+(j*XThreads)+xIdx]=Srcs[(j*XThreads)+xIdx];
}
}
}
}
extern "C" __global__ void TransposeFP16(int numthreads,
const __half *src,
const int *buf,
const int ndims,
__half *dest)
{
const int *src_strides = buf;
const int *dest_strides = &buf[ndims];
const int *perm = &buf[ndims * 2];
CUDA_GRID_LOOP_X(destIdx, numthreads)
{
int srcIdx = 0;
int t = destIdx;
for (int i = 0; i < ndims; ++i)
{
const int ratio = t / dest_strides[i];
t -= ratio * dest_strides[i];
srcIdx += (ratio * src_strides[perm[i]]);
}
dest[destIdx] = src[srcIdx];
}
}
extern "C" __global__ void SwapEveryOtherFP16(
const int n, //total batches
const int totalbatches,
__half *t1,
__half *t2,
const int start,
const int stride)
{
StartAxis(stx,x)
const int BVol = n/2;
__half2 *t1h=(half2 *)t1;
__half2 *t2h=(half2 *)t2;
for (int i =start;i<totalbatches;i+=stride)
{
CUDA_GRID_LOOP_X(xIdx, BVol)
{
const __half2 swapper = t1h[(i*BVol)+(xIdx)];
t1h[(i*BVol) +xIdx]=t2h[(i*BVol)+xIdx];
t2h[(i*BVol)+xIdx]=swapper;
}
if (stx==0 && (n%2)){
const int xIdx=n-1;
const __half swapper = t1[(i*n)+(xIdx)];
t1[(i*n) +(xIdx)]=t1[(i*n)+(xIdx)];
t2[(i*n)+(xIdx)]=swapper;
}
__syncthreads();
}
}
extern "C" __global__ void SwapUpperLowerFP16(
const int xThreads, //batchsize
const int yThreads, //batchvol
__half *t1,
__half *t2,
const int t1upper,
const int t2upper,
const int inverse)
{
const int BVol = yThreads;
if (t1upper>0)
{
CUDA_GRID_AXIS_LOOP(xIdx,xThreads/2,x)
{
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=xThreads/2 +xIdx;
}
if (xIdx < xThreads && t2Idx<xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, BVol,y)
{
const __half swapper = t1[(xIdx*BVol)+(yIdx)];
t1[(xIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(xIdx*BVol)+yIdx]=swapper;
}
}
}
}
else
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads/2,x)
{
const int halfIdx=(xThreads/2)+xIdx;
int t2Idx;
if (t2upper>0){
t2Idx=xIdx;
}else{
t2Idx=halfIdx;
}
if (halfIdx < xThreads)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads,y)
{
const __half swapper = t1[(halfIdx*BVol)+(yIdx)];
t1[(halfIdx*BVol) +yIdx]=t2[(t2Idx*BVol)+yIdx];
t2[(halfIdx*BVol)+yIdx]=swapper;
}
}
}
}
}
//ShapetoBatch4DNHWC Does a stride shape to batch. Make sure values on receiving end are set to zero when s2b is 0
extern "C" __global__ void ShapetoBatch4DNHWCFP16(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
__half *shape,
__half *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = yThreads * zThreads;
int batch3 = zThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + xIdx;
int ow = (wstride * j) + yIdx;
if (S2B)
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (oh * hSize * zThreads) + (ow * zThreads) + zIdx] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
extern "C" __global__ void ShapetoBatch4DNCHWFP16(
const int xThreads,
const int yThreads,
const int zThreads,
const int hSize,
const int wSize,
const int num_original_batches,
const int BatchVolume,
const int OriginalVol,
const int N1,
const int N2,
const int hstride,
const int wstride,
__half *shape,
__half *batch,
const int h_over_scan,
const int w_over_scan,
const bool S2B)
{
int batch0 = N2 * xThreads * yThreads * zThreads;
int batch1 = xThreads * yThreads * zThreads;
int batch2 = xThreads * yThreads;
int batch3 = yThreads;
for (int b = 0;b<num_original_batches;b++)
{
const int ShapeOffset = OriginalVol*b;
const int BatchOffset=BatchVolume*b;
for (int i = 0; i < N1; i++)
{
for (int j = 0; j < N2; j++)
{
CUDA_GRID_AXIS_LOOP(xIdx, xThreads, x)
{
CUDA_GRID_AXIS_LOOP(yIdx, yThreads, y)
{
CUDA_GRID_AXIS_LOOP(zIdx, zThreads, z)
{
int oh = (hstride * i) + yIdx;
int ow = (wstride * j) + zIdx;
if (S2B )
{
if (oh < hSize && ow < wSize)
{
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] =
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow];
}
else
{
if (h_over_scan>0 && ow<wSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
if (w_over_scan>0 && oh<hSize){
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx] = 0;
}
}
}
else
{
shape[ShapeOffset + (xIdx * wSize * hSize) + (oh * wSize) + ow] +=
batch[BatchOffset + (i * batch0) + (j * batch1) + (xIdx * batch2) + (yIdx * batch3) + zIdx];
}
}
}
}
}
}
}
}
extern "C" __global__ void NearestNeighborNCHWFP16(
const int aligncorners,
const int threads,
const __half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
int c = n % channels;
n /= channels;
const __half *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (c * src_height * src_width) + (src_y * src_width) + src_x;
dest[i] = src_data_n[idx];
}
}
#if __CUDA_ARCH__ >= 750 //might not work on other architectures. will probably work best with even tensors.
extern "C" __global__ void NearestNeighborNHWCBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
#else
extern "C" __global__ void NearestNeighborNHWCBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
const __half zer0= __float2half(0.0);
CUDA_GRID_LOOP_X(i, threads-1) //minus one because I do a conversion to half2 wich is 32bit to do the atomic add and don't want to run into space outside of array
{
int n = i;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
const __half2 dsth2 = __halves2half2(dest[i],zer0); // This should give us the value half2[dest,0]
void *vdptr=(void*)(&src_data_n[idx]); //I don't know if I need to do this, but I work with go a lot and wanted to make sure it was going to step correctly
__half2 *srch2hack = (__half2*)(vdptr); //Here say the void pointer address into srch2hack
atomicAdd(srch2hack,dsth2); // this should be (src_data_n[idx]+dest[i], src_data_n[idx+1]+0) //had to do threads -1 so in the last part we don't overstep the bounds
}
//This last part is to do the last value in dest.
int n = threads-1;
int c = n % channels;
n /= channels;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (dest_y * dest_width + dest_x) * channels + c;
src_data_n[idx] = __hadd(src_data_n[idx], dest[threads-1]);
}
#endif
extern "C" __global__ void NearestNeighborNHWCFP16(
const int aligncorners,
const int threads,
const __half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int c = n % channels;
n /= channels;
int dest_x = n % dest_width;
n /= dest_width;
int dest_y = n % dest_height;
n /= dest_height;
const __half *src_data_n = &src[n * channels * src_height * src_width];
const int src_y = fminf((aligncorners) ? (roundf(dest_y * height_scale))
: (floorf(dest_y * height_scale)),
src_height - 1);
const int src_x = fminf((aligncorners) ? (roundf(dest_x * width_scale))
: (floorf(dest_x * width_scale)),
src_width - 1);
const int idx = (src_y * src_width + src_x) * channels + c;
dest[i] = src_data_n[idx];
}
}
#if __CUDA_ARCH__ >= 750 //might not work on other architectures. will probably work best with even tensors.
extern "C" __global__ void NearestNeighborNCHWBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
CUDA_GRID_LOOP_X(i, threads)
{
int n = i;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
atomicAdd(&src_data_n[idx], dest[i]);
}
}
#else
//Might not work with archs that are not 7.5.. but might work best with even tensors.
extern "C" __global__ void NearestNeighborNCHWBackFP16(
const int aligncorners,
const int threads,
__half *src,
const int src_height,
const int src_width,
const int channels,
const int dest_height,
const int dest_width,
const float height_scale,
const float width_scale,
__half *dest)
{
const __half zer0= __float2half(0.0);
CUDA_GRID_LOOP_X(i, threads-1) //minus one because I do a conversion to half2 wich is 32bit to do the atomic add and don't want to run into space outside of array
{
int n = i;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
const __half2 dsth2 = __halves2half2(dest[i],zer0); // This should give us the value half2[dest,0]
void *vdptr=(void*)(&src_data_n[idx]); //I don't know if I need to do this, but I work with go a lot and wanted to make sure it was going to step correctly
__half2 *srch2hack = (__half2*)(vdptr); //Here say the void pointer address into srch2hack
atomicAdd(srch2hack,dsth2); // this should be (src_data_n[idx]+dest[i], src_data_n[idx+1]+0) //had to do threads -1 so in the last part we don't overstep the bounds
}
//This last part is to do the last value in dest.
int n = threads-1;
int src_x = n % src_width;
n /= src_width;
int src_y = n % src_height;
n /= src_height;
int c = n % channels;
n /= channels;
__half *src_data_n = &src[n * channels * src_height * src_width];
const int dest_y = fminf((aligncorners) ? (roundf(src_y * height_scale))
: (floorf(src_y * height_scale)),
dest_height - 1);
const int dest_x = fminf((aligncorners) ? (roundf(src_x * width_scale))
: (floorf(src_x * width_scale)),
dest_width - 1);
const int idx = (c * dest_width * dest_height) + (dest_y * dest_width) + dest_x;
src_data_n[idx] = __hadd(src_data_n[idx], dest[threads-1]);
}
#endif
extern "C" __global__ void AdaGradFP16(const int n,
__half *w, //w input and output
__half *dw, //input and will have to set to zero
__half *gsum, //storage
const __half rate, //input
const __half eps,
const __half dwalpha)
{ //input
StartAxis(stx,x)
int n2=n/2;
__half2 *w2=(__half2*)w,*dw2=(__half2*)dw,*gsum2=(__half2*)gsum;
const __half2 rate2=__halves2half2(rate,rate);
const __half2 eps2=__halves2half2(eps,eps);
const __half2 dwalpha2=__halves2half2(dwalpha,dwalpha);
CUDA_GRID_LOOP_X(i, n2)
{
__half2 holder = gsum2[i];
gsum2[i] = __hfma2(dw2[i],dw2[i],holder);
w2[i] = __hadd2(-__h2div((__hmul2(rate2,dw2[i])) , (__hadd2(h2sqrt(gsum2[i]), eps2))),w2[i]);
dw2[i] =__hmul2(dw2[i],dwalpha2);
}
if (stx==0 && (n%2)){
__half holder = gsum[n-1];
gsum[n-1] = __hfma(dw[n-1],dw[n-1],holder);
w[n-1] = -__hdiv((__hmul(rate,dw[n-1])) , (__hadd(hsqrt(gsum[n-1]), eps)));
dw[n-1] =__hmul(dw[n-1],dwalpha);
}
}
//Need to fix this.
extern "C" __global__ void AdamFP16(const int n,
__half *w,
__half *gsum,
__half *xsum,
__half *dw,
const __half rate,
const __half beta1,
const __half beta2,
const __half eps,
const __half denombeta1,
const __half denombeta2,
const __half dwalpha)
{
int n2=n/2;
__half2 *w2=(__half2*)w,*dw2=(__half2*)dw,*gsum2=(__half2*)gsum,*xsum2=(__half2*)xsum;
const __half2 rate2=__halves2half2(rate,rate);
const __half2 eps2=__halves2half2(eps,eps);
const __half2 dwalpha2=__halves2half2(dwalpha,dwalpha);
const __half2 beta12=__halves2half2(beta1,beta1);
const __half2 beta22=__halves2half2(beta2,beta2);
const __half one1 = __float2half(1.0);
const __half2 one2=__halves2half2(one1,one1);
StartAxis(stx,x)
CUDA_GRID_LOOP_X(i, n2)
{
gsum2[i] =__hfma2(__hsub2(one2,beta12),dw2[i],__hmul2(beta12,gsum2[i]));
__half2 gsumt = __h2div(gsum2[i] ,__halves2half2(denombeta1,denombeta1));
xsum2[i] = __hfma2(beta22 , xsum2[i], __hmul2(__hsub2(one2, beta22), __hmul2(dw2[i] , dw2[i])));
__half2 xsumt = __h2div(xsum2[i] , __halves2half2(denombeta2,denombeta2));
w2[i]=__hsub2(w2[i],__h2div(__hmul2(rate2,gsumt),__hadd2(h2sqrt(xsumt),eps2)));
dw2[i]= __hmul2(dwalpha2,dw2[i]);
}
if (stx==0 && (n%2)){
const int i = n-1;
gsum[i] =__hfma(__hsub(one1,beta1),dw[i],__hmul(beta1,gsum[i]));
__half gsumt = __hdiv(gsum[i] ,denombeta1);
xsum[i] = __hfma(beta2 , xsum[i], __hmul(__hsub(one1, beta2), __hmul(dw[i] , dw[i])));
__half xsumt = __hdiv(xsum[i] , denombeta2);
w[i]=__hsub(w[i],__hdiv(__hmul(rate,gsumt),__hadd(hsqrt(xsumt),eps)));
dw[i]= __hmul(dwalpha,dw[i]);
}
}
extern "C" __global__ void AdaDeltaFP16(const int n,
__half *w, //weights input and output
__half *gsum, //storage
__half *xsum, //storage
__half *dw, //input and will have to set to zero
const __half rate, //input
const __half eps,
const __half ro,
const __half dwalpha)
{
StartAxis(stx,x)
int n2=n/2;
__half2 *w2=(__half2*)w,*dw2=(__half2*)dw,*gsum2=(__half2*)gsum,*xsum2=(__half2*)xsum;
const __half2 rate2=__halves2half2(rate,rate);
const __half2 eps2=__halves2half2(eps,eps);
const __half2 ro2=__halves2half2(ro,ro);
const __half one1 = __float2half(1.0);
const __half2 one2=__halves2half2(one1,one1);
const __half2 dwalpha2=__halves2half2(dwalpha,dwalpha);
CUDA_GRID_LOOP_X(i, n2)
{
gsum2[i]= __hfma2(__hsub2(one2,ro2),__hmul2(dw2[i],dw2[i]),__hmul2(ro2,gsum2[i]));
const __half2 dx2= __hmul2(h2sqrt(__h2div(__hadd2(xsum2[i],eps2),__hadd2(gsum2[i],eps2))),dw2[i]);
xsum2[i]= __hfma2(__hsub2(one2,ro2),__hmul2(dx2,dx2),__hmul2(ro2,xsum2[i]));
w2[i] =__hsub2(w2[i],dx2);
dw2[i] = __hmul2(dw2[i],dwalpha2);
}
if (stx ==0 &&(n%2)){
int i = n-1;
gsum[i]= __hfma(__hsub(one1,ro),__hmul(dw[i],dw[i]),__hmul(ro,gsum[i]));
const __half dx= __hmul(hsqrt(__hdiv(__hadd(xsum[i],eps),__hadd(gsum[i],eps))),dw[i]);
xsum[i]= __hfma(__hsub(one1,ro),__hmul(dx,dx),__hmul(ro,xsum[i]));
w[i] =__hsub(w[i],dx);
dw[i] = __hmul(dw[i],dwalpha);
}
}
#if __CUDA_ARCH__ >= 750
extern "C" __global__ void L1L2FP16(
const int length,
__half *dw, //input and output
const __half *w, //input needs to ba an array
__half *l1, //output set to zero
__half *l2, //output set to zero
const __half batch, // should be an int but just send it as a float
const __half decay1, //input
const __half decay2)
{ //input
const __half one1 = __float2half(1.0);
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
__half abs = w[i];
if (__hlt(abs,zero0)){
abs=-abs;
}
//atomicAdd(l1, abs(w[i]) * decay1);
atomicAdd(l1,__hmul(abs,decay1));
//atomicAdd(l2, (w[i] * w[i] * decay2) / 2.0);
atomicAdd(l2, __hdiv(__hmul(__hmul(w[i] , w[i]) , decay2) , 2.0));
//const float gradl1 = decay1 * (w[i] > 0 ? 1 : -1);
const __half gradl1 = __hmul(decay1, (__hgt(w[i],zero0) ? one1 : -one1));
//const float gradl2 = w[i] * decay2;
const __half gradl2 = __hmul(w[i] ,decay2);
//dw[i] = (dw[i] + gradl2 + gradl1) / batch;
dw[i] = __hdiv(__hadd(__hadd(dw[i], gradl2) , gradl1) , batch);
}
}
#else
extern "C" __global__ void L1L2FP16(
const int length,
__half *dw, //input and output
const __half *w, //input needs to ba an array
__half *l1, //output set to zero
__half *l2, //output set to zero
const __half batch, // should be an int but just send it as a float
const __half decay1, //input
const __half decay2)
{ //input
const __half one1 = __float2half(1.0);
const __half zero0 = __float2half(0);
__shared__ __half2 *l1l2h2;
__half2 *l1h2=&l1l2h2[0];
__half2 *l2h2=&l1l2h2[1];
CUDA_GRID_LOOP_X(i, length)
{
__half abs = w[i];
if (__hlt(abs,zero0)){
abs=-abs;
}
//atomicAdd(l1, abs(w[i]) * decay1);
const __half2 result= __halves2half2( __hmul(abs,decay1),zero0);
atomicAdd(l1h2,result);
//atomicAdd(l2, (w[i] * w[i] * decay2) / 2.0);
const __half2 result2= __halves2half2(__hdiv(__hmul(__hmul(w[i] , w[i]) , decay2) , 2.0),zero0);
atomicAdd(l2h2,result2 );
//const float gradl1 = decay1 * (w[i] > 0 ? 1 : -1);
const __half gradl1 = __hmul(decay1, (__hgt(w[i],zero0) ? one1 : -one1));
//const float gradl2 = w[i] * decay2;
const __half gradl2 = __hmul(w[i] ,decay2);
//dw[i] = (dw[i] + gradl2 + gradl1) / batch;
dw[i] = __hdiv(__hadd(__hadd(dw[i], gradl2) , gradl1) , batch);
}
l1[0]=__low2half(l1h2[0]);
l2[0]=__low2half(l2h2[0]);
}
#endif
extern "C" __global__ void ThreshForwardFP16(const int XThreads,
const int batchsize,
const __half *x,
__half *y,
const __half *negcoefs,
const __half *threshhold,
const __half *poscoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],threshhold[xIdx]))
{
y[stride+xIdx]= __hmul(x[stride+xIdx],poscoefs[xIdx]);
}
else
{
y[stride+xIdx]= __hmul(negcoefs[xIdx],x[stride+xIdx]);
}
}
}
}
extern "C" __global__ void ThreshBackwardFP16(const int XThreads,
const int batchsize,
const __half *x,
__half *dx,
const __half *dy,
const __half *negcoefs,
__half *dnegcoefs,
const __half *threshhold,
const __half *poscoefs,
__half *dposcoefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],threshhold[xIdx]))
{
// dx[stride+xIdx]= poscoefs[xIdx]*dy[stride+xIdx];
dx[stride+xIdx]=__hmul(dy[stride+xIdx],poscoefs[xIdx]);
// dposcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
dposcoefs[xIdx]=__hfma(dy[xIdx],x[stride+xIdx],dposcoefs[xIdx]);
}
else
{
// dx[stride+xIdx]= negcoefs[xIdx]*dy[stride+xIdx];
dx[stride+xIdx]= __hmul(dy[stride+xIdx],negcoefs[xIdx]);
// dnegcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
dnegcoefs[xIdx]=__hfma(dy[xIdx],x[stride+xIdx],dnegcoefs[xIdx]);
}
}
}
}
extern "C" __global__ void PreluForwardFP16(const int XThreads,
const int batchsize,
const __half *x,
__half *y,
const __half *coefs)
{
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],0))
{
y[stride+xIdx]= x[stride+xIdx];
}
else
{
y[stride+xIdx]= __hmul(coefs[xIdx],x[stride+xIdx]);
}
}
}
}
extern "C" __global__ void PreluBackwardFP16(const int XThreads,
const int batchsize,
__half *dx,
const __half *x,
const __half *dy,
const __half *coefs,
__half *dcoefs)
{
const __half zero0 = __float2half(0);
for (int i=0;i<batchsize;i++)
{
int stride=XThreads*i;
CUDA_GRID_LOOP_X(xIdx,XThreads)
{
if (__hgt(x[stride+xIdx],zero0))
{
dx[stride+xIdx]= dy[stride+xIdx];
}
else
{
// dx[stride+xIdx]= coefs[xIdx]*dy[stride+xIdx];
dx[stride+xIdx]= __hmul(coefs[xIdx],dy[stride+xIdx]);
// dcoefs[xIdx]+=dy[xIdx]*x[stride+xIdx];
dcoefs[xIdx]=__hfma(dy[xIdx],x[stride+xIdx],dcoefs[xIdx]);
}
}
}
}
extern "C" __global__ void LeakyForwardAlphaBetaFP16(const int length,
const __half *x,
__half *y,
const __half coef,
const __half alpha,
const __half beta)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
// y[i] = (beta*y[i]) + (alpha *x[i]) ;
y[i]=__hadd(__hmul(beta,y[i]),__hmul(alpha,x[i]));
}
else
{
//y[i] = (beta*previous) + (alpha *x[i]*coef);
y[i]=__hadd(__hmul(beta,y[i]),__hmul(alpha,__hmul(x[i],coef)));
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlphaBetaFP16(const int length,
const __half *x,
__half *dx,
const __half *dy,
const __half coef,
const __half alpha,
const __half beta)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
// dx[i] =(beta *dx[i]) + (dy[i] * alpha);
dx[i]=__hadd(__hmul(beta,dy[i]),__hmul(alpha,dx[i]));
}
else
{
// dx[i] = (beta *dx[i]) + (dy[i]*coef * alpha);
dx[i]=__hadd(__hmul(beta,dx[i]),__hmul(alpha,__hmul(dy[i],coef)));
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForwardAlphaFP16(const int length,
const __half *x,
__half *y,
const __half coef,
const __half alpha)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
y[i] = __hmul(alpha ,x[i]);
}
else
{
y[i] =__hmul(__hmul(x[i],coef) , alpha);
}
__syncthreads();
}
}
extern "C" __global__ void LeakyBackwardAlphaFP16(const int length,
const __half *x,
__half *dx,
const __half *dy,
const __half coef,
const __half alpha)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
// dx[i] = dy[i]*alpha;
dx[i] = __hmul(alpha ,dy[i]);
}
else
{
// dx[i] = dy[i]*coef *alpha;
dx[i] =__hmul(__hmul(dy[i],coef) , alpha);
}
__syncthreads();
}
}
extern "C" __global__ void LeakyForwardFP16(const int length,
const __half *x,
__half *y,
const __half coef)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
y[i] = x[i];
}
else
{
// y[i] = x[i] * coef;
y[i]= __hmul( x[i] , coef);
}
}
}
extern "C" __global__ void LeakyBackwardFP16(const int length,
const __half *x,
__half *dx,
const __half *dy,
const __half coef)
{
const __half zero0 = __float2half(0);
CUDA_GRID_LOOP_X(i, length)
{
if (__hgt(x[i],zero0))
{
dx[i] = dy[i];
}
else
{
// dx[i] = dy[i] * coef;
dx[i]= __hmul( dy[i] , coef);
}
}
}
#if __CUDA_ARCH__ >= 750
extern "C" __global__ void MSELossbyBatchesFP16(const int xthreads,
const int ythreads,
__half *errors,
const __half *target,
const __half *networkout,
__half *loss)
{
const __half htwo= __float2half(2.0);
CUDA_GRID_AXIS_LOOP(xIdx,xthreads,x)
{
const int i=ythreads*xIdx;
CUDA_GRID_AXIS_LOOP(yIdx, ythreads,y)
{
const __half y = __hsub(networkout[i] , target[i]);
errors[i] = y;
atomicAdd(&loss[xIdx], __hdiv(__hmul(y , y) , htwo));
}
}
}
extern "C" __global__ void MSELossFP16(const int n,
__half *errors,
const __half *target,
const __half *networkout,
__half *loss,
const __half alpha,
const __half beta)
{
StartAxis(stx,x)
int n2=n/2;
__half2 *errors2=(__half2*)errors, *target2=(__half2*)target, *networkout2=(__half2*)networkout, *loss2=(__half2*)loss;
// const __half2 alpha2=__halves2half2(alpha), beta2=__halves2half2(beta);
const __half2 htwo2=__halves2half2(__float2half(2.0),__float2half(2.0));
const __half htwo= __float2half(2.0);
loss[0]=0;
CUDA_GRID_LOOP_X(i, n2)
{
const __half2 y = __hsub2(networkout2[i] , target2[i]);
errors2[i] = y;
atomicAdd(loss2, __h2div(__hmul2(y , y) ,htwo2));
}
if (stx==0 && (n%2)){
const int i=n-1;
const __half y = __hsub(networkout[i] , target[i]);
errors[i] = y;
atomicAdd(loss, __hdiv(__hmul(y , y) , htwo));
}
}
#else
extern "C" __global__ void MSELossbyBatchesFP16(
const int xthreads,
const int batches,
__half2 *errors,
const __half2 *target,
const __half2 *networkout,
__half *loss)
{
const __half htwo= __float2half(2.0);
const __half2 htwo2 =__halves2half2(htwo,htwo);
const int n=xthreads/2;
__shared__ __half2 *loss2;
for (int i=0; i<batches;i++){
loss2[i]=__floats2half2_rn(0.0,0.0);
CUDA_GRID_AXIS_LOOP(xIdx,n,x)
{
const __half2 y = __hsub2(networkout[i*n+xIdx] , target[i*n+xIdx]);
errors[i] = y;
atomicAdd(&loss2[i], __h2div(__hmul2(y , y) , htwo2));
}
loss[i]=__hadd(__low2half(loss2[i]),__high2half(loss2[i]));
}
}
extern "C" __global__ void MSELossFP16(const int n,
__half2 *errors,
const __half2 *target,
const __half2 *networkout,
__half *loss,
const __half alpha,
const __half beta)
{
// StartAxis(stx,x)
int n2=n/2;
// const __half2 alpha2=__halves2half2(alpha), beta2=__halves2half2(beta);
const __half2 htwo2=__halves2half2(__float2half(2.0),__float2half(2.0));
// const __half htwo= __float2half(2.0);
__shared__ __half2 *loss2;
loss2[0]= __halves2half2(__float2half(0.0),__float2half(0.0));
CUDA_GRID_LOOP_X(i, n2)
{
const __half2 y = __hsub2(networkout[i] , target[i]);
errors[i] = y;
atomicAdd(loss2, __h2div(__hmul2(y , y) ,htwo2));
}
loss[0]=__hadd(__low2half(loss2[0]),__high2half(loss2[0]));
}
#endif |
8d9104af2c07f189a25daa80a23b734cb4d89406.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include"timer.h"
#define NUM 10000
__global__ void find_max(int *arr, int *max){
int i;
int idx= blockIdx.x*blockDim.x + threadIdx.x;
for(i=(*max)/2; ; i/=2){
if( arr[idx] < arr[idx+i])
arr[idx] = arr[idx+i];
if( i%2!=0) break;
}
for(int j=0; i<10; i++){
if(arr[idx] < arr[idx+j])
arr[idx]=arr[idx+j];
}
/*
for(int i=0; i<NUM/1024+1; i++){
if( idx+i*blockDim.x < NUM && arr[idx] < arr[idx+i*blockDim.x] )
arr[idx] = arr[idx+i*blockDim.x];
}
for(int i=512; i>0 ;i/=2){
if(arr[idx] < arr[idx+i])
arr[idx] = arr[idx+i];
}
*/
*max = arr[0];
}
int main(int argc, char *argv[]){
int *arr;
int *d_arr, *d_m;
int max=0, i, n;
double st, fn;
srand(time(NULL));
if( NUM%1024 != 0 )
n = ((int)(NUM/1024)+1)*1024;
arr=(int*)calloc(n, sizeof(int));
for(i =0; i<n; i++)
arr[i]=rand()%10000;
hipMalloc((void**)&d_arr, sizeof(int)*n);
hipMalloc((void**)&d_m, sizeof(int));
hipMemcpy( d_arr, arr, sizeof(int)*NUM, hipMemcpyHostToDevice);
hipMemcpy( d_m, &n, sizeof(int), hipMemcpyHostToDevice);
GET_TIME(st);
hipLaunchKernelGGL(( find_max), dim3(10), dim3(512), 0, 0, d_arr, d_m);
hipMemcpy( &max, d_m, sizeof(int), hipMemcpyDeviceToHost);
GET_TIME(fn);
printf("%d\n", max);
printf("Elapsed Time: %lf\n", fn-st);
hipFree(d_arr);
hipFree(d_m);
free(arr);
return 0;
}
| 8d9104af2c07f189a25daa80a23b734cb4d89406.cu | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include"timer.h"
#define NUM 10000
__global__ void find_max(int *arr, int *max){
int i;
int idx= blockIdx.x*blockDim.x + threadIdx.x;
for(i=(*max)/2; ; i/=2){
if( arr[idx] < arr[idx+i])
arr[idx] = arr[idx+i];
if( i%2!=0) break;
}
for(int j=0; i<10; i++){
if(arr[idx] < arr[idx+j])
arr[idx]=arr[idx+j];
}
/*
for(int i=0; i<NUM/1024+1; i++){
if( idx+i*blockDim.x < NUM && arr[idx] < arr[idx+i*blockDim.x] )
arr[idx] = arr[idx+i*blockDim.x];
}
for(int i=512; i>0 ;i/=2){
if(arr[idx] < arr[idx+i])
arr[idx] = arr[idx+i];
}
*/
*max = arr[0];
}
int main(int argc, char *argv[]){
int *arr;
int *d_arr, *d_m;
int max=0, i, n;
double st, fn;
srand(time(NULL));
if( NUM%1024 != 0 )
n = ((int)(NUM/1024)+1)*1024;
arr=(int*)calloc(n, sizeof(int));
for(i =0; i<n; i++)
arr[i]=rand()%10000;
cudaMalloc((void**)&d_arr, sizeof(int)*n);
cudaMalloc((void**)&d_m, sizeof(int));
cudaMemcpy( d_arr, arr, sizeof(int)*NUM, cudaMemcpyHostToDevice);
cudaMemcpy( d_m, &n, sizeof(int), cudaMemcpyHostToDevice);
GET_TIME(st);
find_max<<<10, 512>>>(d_arr, d_m);
cudaMemcpy( &max, d_m, sizeof(int), cudaMemcpyDeviceToHost);
GET_TIME(fn);
printf("%d\n", max);
printf("Elapsed Time: %lf\n", fn-st);
cudaFree(d_arr);
cudaFree(d_m);
free(arr);
return 0;
}
|
2a82722ab9b52c255f7cc7cfbaa3d58ca13d3508.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
__global__ void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(void)
{
int N = 1 << 20;
float *x, *y, *d_x, *d_y;
x = (float *)malloc(N * sizeof(float));
y = (float *)malloc(N * sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
hipMalloc(&d_x, N * sizeof(float));
hipMalloc(&d_y, N * sizeof(float));
hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N * sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N + 255) / 256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_y);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken;
time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Time: %f\n", time_taken);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i] - 4.0f));
printf("Max error: %f\n", maxError);
free(x);
free(y);
} | 2a82722ab9b52c255f7cc7cfbaa3d58ca13d3508.cu | #include <stdio.h>
#include <sys/time.h>
__global__ void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(void)
{
int N = 1 << 20;
float *x, *y, *d_x, *d_y;
x = (float *)malloc(N * sizeof(float));
y = (float *)malloc(N * sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_y, N * sizeof(float));
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N + 255) / 256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken;
time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Time: %f\n", time_taken);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i] - 4.0f));
printf("Max error: %f\n", maxError);
free(x);
free(y);
} |
0d5d3b5b47cf500841d729fe98e850884263d1bc.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 0d5d3b5b47cf500841d729fe98e850884263d1bc.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
05ec6f6ea0049e277acbbd46e0aa49c29e5a5d5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#include "types.h"
#include <stdio.h>
#include <stdbool.h>
#include <hiprand/hiprand_kernel.h>
#define m0Mask 0xC000
#define m1Mask 0x3000
#define m2Mask 0x0C00
#define m3Mask 0x0300
#define m4Mask 0x00C0
#define m5Mask 0x0030
#define m6Mask 0x000C
#define m7Mask 0x0003
#define eighthSize 65536
#define quarterSize 16384
#define halfSize 32768
#define threeQuarterSize 49152
#define blockSize 256
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
/**
* This function returns a score for the Board. Right now just returns the game score, may be worked to include neighboring tiles combined scores if we need more accuracy.
* @param input Pointer to Board to be scored.
* @return The score
*/
__device__ uint32_t score(Board * input){
uint32_t scoreVal;
scoreVal = 0;
for(uint8_t i=0; i < HEIGHT; i++){
for(uint8_t j=0; j < WIDTH; j++){
scoreVal += (*input)[i][j];
//printf("[%d] ",(*input)[i][j]);
}
//printf("\r\n");
}
return scoreVal;
}
__device__ void leftSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
moveCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=0; j < WIDTH; j++){
int test = (*output)[i][j];
if(test == 0){
moveCounter++;
}
else if(moveCounter != 0){
((*output))[i][(j-moveCounter)] = ((*output))[i][j];
((*output))[i][j] = 0;
}
}
moveCounter = 0;
}
//This section merges any nearby values
mergeCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=0; j < WIDTH-1; j++){
if(((*output))[i][j] == ((*output))[i][j+1]){
((*output))[i][j] = 2*(((*output))[i][j]);
mergeCounter++;
((*output))[i][j+1] = 0;
}
if(mergeCounter != 0 && ((*output))[i][j+1] != 0){
(*output)[i][j] = (*output)[i][j+1];
(*output)[i][j+1] = 0;
}
}
mergeCounter = 0;
}
}
__device__ void rightSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
moveCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=WIDTH-1; j >= 0; j--){
int test = (*output)[i][j];
if(test == 0){
moveCounter++;
}
else if(moveCounter != 0){
((*output))[i][(j+moveCounter)] = ((*output))[i][j];
((*output))[i][j] = 0;
}
}
moveCounter = 0;
}
//This section merges any nearby values
mergeCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=WIDTH-1; j > 0; j--){
if(((*output))[i][j] == ((*output))[i][j-1]){
((*output))[i][j] = 2*(((*output))[i][j]);
mergeCounter++;
((*output))[i][j-1] = 0;
}
if(mergeCounter != 0 && (*output)[i][j-1] != 0){
(*output)[i][j] = (*output)[i][j-1];
(*output)[i][j-1] = 0;
}
}
mergeCounter = 0;
}
}
__device__ void upSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
moveCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=0; i < HEIGHT; i++){
if((*output)[i][j] == 0){
moveCounter++;
}
else if(moveCounter != 0){
(*output)[(i-moveCounter)][j] = (*output)[i][j];
(*output)[i][j] = 0;
}
}
moveCounter = 0;
}
mergeCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=0; i < HEIGHT-1; i++){
if((*output)[i][j] == (*output)[i+1][j]){
(*output)[i][j] = 2*((*output)[i][j]);
mergeCounter++;
(*output)[i+1][j] = 0;
}
if(mergeCounter != 0 && (*output)[i+1][j] != 0){
(*output)[i][j] = (*output)[i+1][j];
(*output)[i+1][j] = 0;
}
}
mergeCounter = 0;
}
}
__device__ void downSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
//Might not need to dereference board pointers
moveCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=HEIGHT-1; i >= 0; i--){
if((*output)[i][j] == 0){
moveCounter++;
}
else if(moveCounter != 0){
(*output)[(i+moveCounter)][j] = (*output)[i][j];
(*output)[i][j] = 0;
}
}
moveCounter = 0;
}
mergeCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=HEIGHT-1; i > 0; i--){
if((*output)[i][j] == (*output)[i-1][j]){
(*output)[i][j] = 2*((*output)[i][j]);
mergeCounter++;
(*output)[i-1][j] = 0;
}
if(mergeCounter != 0 && (*output)[i-1][j] != 0){
(*output)[i][j] = (*output)[i-1][j];
(*output)[i-1][j] = 0;
}
}
mergeCounter = 0;
}
}
/**
* This function adds the random move to the board. This will most likely change later on to fit with the CUDA program so they produce the same results.
* @param movedBoard A pointer to a Board object to have a random tile added to the board.
*/
__device__ void randGen(Board * input, Board * movedBoard){
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*movedBoard)[q][r] = (*input)[q][r];
}
}
unsigned long long seed= (*movedBoard)[0][0] + 2 * (*movedBoard)[0][1] + 3 * (*movedBoard)[0][2] + 4 * (*movedBoard)[0][3] + 5 * (*movedBoard)[1][0] + 6 * (*movedBoard)[1][1] + 7 * (*movedBoard)[1][2] + 8 * (*movedBoard)[1][3];
hiprandState_t state;
hiprand_init(seed, 0,0, &state);
unsigned int randNum = hiprand(&state);
unsigned char position = randNum % SIZE;
while((*movedBoard)[(position/WIDTH)][(position%HEIGHT)] != 0){
randNum = hiprand(&state);
position = randNum % SIZE;
}
unsigned int randomValue = hiprand(&state);
if(randomValue % 10 == 9){
(*movedBoard)[(position/WIDTH)][(position%HEIGHT)] = 4;
}
else{
(*movedBoard)[(position/WIDTH)][(position%HEIGHT)] = 2;
}
}
/**
*This takes the predetermined move and returns a Board that has had that move applied. This should be the link between the recursive section of the code and the solver
* @param input The board that is requested to be solved
* @param currMove The move to apply to the board
* @param output A pointer for the board after the move has occurred to be stored in.
* @return Returns the status of the move. Whether or not the board was updated.
*/
__device__ status moveHandler(Board * input, Board * output, Move currMove){
switch(currMove){
case(up):
// printf("Moving up \r\n");
upSolver(input, output);
break;
case down:
// printf("Moving down \r\n");
downSolver(input, output);
break;
case left:
// printf("Moving left \r\n");
leftSolver(input, output);
break;
case right:
// printf("Moving right \r\n");
rightSolver(input, output);
break;
}
bool changed = false;
bool fail = true;
for(uint8_t i=0; i < HEIGHT; i++){
for(uint8_t j=0; j < WIDTH;j++) {
if ((*output)[i][j] == 0) {
fail = false;
}
if ((*output)[i][j] != (*input)[i][j]) {
changed = true;
}
}
}
if(fail){
return boardFull;
}
else if(!changed){
return boardUnchanged;
}
return boardUpdated;
}
__global__ void maxReduce(int *d_idata, int *d_odata) {
__shared__ int sdata[512];
unsigned int tid = threadIdx.x;
unsigned int index = (blockIdx.x * blockDim.x) + tid;
sdata[tid] = d_idata[index];
__syncthreads();
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) {
if (tid < stride){
sdata[tid] = max(sdata[tid], sdata[tid + stride]);
}
}
__syncthreads();
if (tid == 0){
d_odata[blockIdx.x] = sdata[0];
}
}
__global__ void kernel(Board *BoardIn, int * scoreList){
int tx = threadIdx.x;
int bx = blockIdx.x;
int bd = blockDim.x;
uint32_t threadNum = bx * bd + tx;
Board boardIn;
Board boardOut;
int i,j;
for(i = 0; i < HEIGHT; i++){
for(j = 0; j < WIDTH; j++){
boardIn[i][j] = (*BoardIn)[i][j];
}
}
Board movedBoard;
status stat;
Move mList[NUMMOVES];
//Bitwise and with mask ends up creating many invalid moves (see: 0xC000 & 0xC000), need to rightshift
mList[0] = (Move) ((threadNum & m0Mask) >> 14);
mList[1] = (Move) ((threadNum & m1Mask) >> 12);
mList[2] = (Move) ((threadNum & m2Mask) >> 10);
mList[3] = (Move) ((threadNum & m3Mask) >> 8);
mList[4] = (Move) ((threadNum & m4Mask) >> 6);
mList[5] = (Move) ((threadNum & m5Mask) >> 4);
mList[6] = (Move) ((threadNum & m6Mask) >> 2);
mList[7] = (Move) ((threadNum & m7Mask));
scoreList[threadNum] = 0;
for(i = 0; i < NUMMOVES; i++){
stat = moveHandler(&boardIn,&boardOut,mList[i]);
if(stat != boardUpdated){
break;
}
if(i != 7){
randGen(&boardOut,&movedBoard);
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
boardIn[q][r] = movedBoard[q][r];
}
}
}
else{
scoreList[threadNum] = score(&boardOut);
}
}
if(scoreList[threadNum] != 0){
//printf("DEBUG SCORE:%d\r\n",scoreList[threadNum]);
}
__syncthreads();
return;
}
int main(int argc, char **argv) {
wbArg_t arg;
Board hostInputBoard;
Board * deviceInputBoard;
char *inputBoardFile;
int *hostScoreList;
int *hostFinalScore;
int *deviceScoreList;
int *deviceFinalScore;
int Score;
int inputLength;
arg = wbArg_read(argc, argv);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
int numScores = (int)pow(4, 8);
int scoreListSize = numScores * sizeof(int);
int boardSize = SIZE * sizeof(int);
//inputBoardFile = (char *)wbImport(wbArg_getInputFile(arg, 0), &inputLength);
hostScoreList = (int *)malloc(scoreListSize);
hostFinalScore = (int *) malloc(blockSize * sizeof(int));
/*
for(int i = 0; i < HEIGHT; i++){
for(int j = 0; j < WIDTH; j++){
(*hostInputBoard)[i][j] = (*inputBoardFile)[i * WIDTH + j];
}
}
*/
hostInputBoard[0][0] = 0;
hostInputBoard[0][1] = 0;
hostInputBoard[0][2] = 0;
hostInputBoard[0][3] = 0;
hostInputBoard[1][0] = 2;
hostInputBoard[1][1] = 0;
hostInputBoard[1][2] = 0;
hostInputBoard[1][3] = 0;
hostInputBoard[2][0] = 0;
hostInputBoard[2][1] = 2;
hostInputBoard[2][2] = 256;
hostInputBoard[2][3] = 0;
hostInputBoard[3][0] = 0;
hostInputBoard[3][1] = 0;
hostInputBoard[3][2] = 0;
hostInputBoard[3][3] = 0;
wbCheck(hipMalloc((void**)&deviceScoreList, scoreListSize));
wbCheck(hipMalloc((void**)&deviceInputBoard, boardSize));
//CUDA MALLOC SINGLE SCORE FROM EACH BLOCK
wbCheck(hipMalloc((void**)&deviceFinalScore, blockSize * sizeof(int)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
wbCheck(hipMemcpy(deviceInputBoard, &hostInputBoard, boardSize, hipMemcpyHostToDevice));
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimGrid(256, 1, 1);
dim3 DimBlock(256, 1, 1);
hipLaunchKernelGGL(( kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInputBoard,deviceScoreList);
wbTime_stop(Compute, "Doing the computation on the GPU");
hipDeviceSynchronize();
wbCheck(hipPeekAtLastError());
////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
wbCheck(hipMemcpy(hostScoreList, deviceScoreList, scoreListSize, hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying data from the GPU");
//RUN REDUCTION KERNEL
hipLaunchKernelGGL(( maxReduce), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceScoreList, deviceFinalScore);
wbCheck(hipMemcpy(hostFinalScore, deviceFinalScore, blockSize * sizeof(int), hipMemcpyDeviceToHost));
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
long i;
int upCount = 0;
int downCount = 0;
int leftCount = 0;
int rightCount = 0;
for(i = 0; i < blockSize; i++){
printf("Return %d: %d \r\n", i, hostFinalScore[i]);
}
//Determine the highest Board Score
for (i = 0; i < eighthSize; i++) {
printf("%d : %d \r\n", i, hostScoreList[i]);
if (hostScoreList[i] == hostFinalScore[0]) {
if(i < quarterSize){
upCount++;
} else if(i < halfSize){
downCount++;
} else if(i < threeQuarterSize){
leftCount++;
} else{
rightCount++;
}
}
}
int total = upCount + downCount + leftCount + rightCount;
printf("Up CHANCE: %d / %d \r\n", upCount, total);
printf("Down CHANCE: %d / %d \r\n", downCount, total);
printf("Left CHANCE: %d / %d \r\n", leftCount, total);
printf("Right CHANCE: %d / %d \r\n", rightCount, total);
printf("Highest Score Predicted: %ld \r\n", hostFinalScore[0]);
wbSolution(arg, hostScoreList, scoreListSize);
wbCheck(hipFree(deviceScoreList));
wbCheck(hipFree(deviceInputBoard));
wbCheck(hipFree(deviceFinalScore));
free(hostScoreList);
free(hostFinalScore);
return 0;
}
| 05ec6f6ea0049e277acbbd46e0aa49c29e5a5d5a.cu |
#include <wb.h>
#include "types.h"
#include <stdio.h>
#include <stdbool.h>
#include <curand_kernel.h>
#define m0Mask 0xC000
#define m1Mask 0x3000
#define m2Mask 0x0C00
#define m3Mask 0x0300
#define m4Mask 0x00C0
#define m5Mask 0x0030
#define m6Mask 0x000C
#define m7Mask 0x0003
#define eighthSize 65536
#define quarterSize 16384
#define halfSize 32768
#define threeQuarterSize 49152
#define blockSize 256
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
/**
* This function returns a score for the Board. Right now just returns the game score, may be worked to include neighboring tiles combined scores if we need more accuracy.
* @param input Pointer to Board to be scored.
* @return The score
*/
__device__ uint32_t score(Board * input){
uint32_t scoreVal;
scoreVal = 0;
for(uint8_t i=0; i < HEIGHT; i++){
for(uint8_t j=0; j < WIDTH; j++){
scoreVal += (*input)[i][j];
//printf("[%d] ",(*input)[i][j]);
}
//printf("\r\n");
}
return scoreVal;
}
__device__ void leftSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
moveCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=0; j < WIDTH; j++){
int test = (*output)[i][j];
if(test == 0){
moveCounter++;
}
else if(moveCounter != 0){
((*output))[i][(j-moveCounter)] = ((*output))[i][j];
((*output))[i][j] = 0;
}
}
moveCounter = 0;
}
//This section merges any nearby values
mergeCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=0; j < WIDTH-1; j++){
if(((*output))[i][j] == ((*output))[i][j+1]){
((*output))[i][j] = 2*(((*output))[i][j]);
mergeCounter++;
((*output))[i][j+1] = 0;
}
if(mergeCounter != 0 && ((*output))[i][j+1] != 0){
(*output)[i][j] = (*output)[i][j+1];
(*output)[i][j+1] = 0;
}
}
mergeCounter = 0;
}
}
__device__ void rightSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
moveCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=WIDTH-1; j >= 0; j--){
int test = (*output)[i][j];
if(test == 0){
moveCounter++;
}
else if(moveCounter != 0){
((*output))[i][(j+moveCounter)] = ((*output))[i][j];
((*output))[i][j] = 0;
}
}
moveCounter = 0;
}
//This section merges any nearby values
mergeCounter = 0;
for(i=0; i < HEIGHT; i++){
for(j=WIDTH-1; j > 0; j--){
if(((*output))[i][j] == ((*output))[i][j-1]){
((*output))[i][j] = 2*(((*output))[i][j]);
mergeCounter++;
((*output))[i][j-1] = 0;
}
if(mergeCounter != 0 && (*output)[i][j-1] != 0){
(*output)[i][j] = (*output)[i][j-1];
(*output)[i][j-1] = 0;
}
}
mergeCounter = 0;
}
}
__device__ void upSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
moveCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=0; i < HEIGHT; i++){
if((*output)[i][j] == 0){
moveCounter++;
}
else if(moveCounter != 0){
(*output)[(i-moveCounter)][j] = (*output)[i][j];
(*output)[i][j] = 0;
}
}
moveCounter = 0;
}
mergeCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=0; i < HEIGHT-1; i++){
if((*output)[i][j] == (*output)[i+1][j]){
(*output)[i][j] = 2*((*output)[i][j]);
mergeCounter++;
(*output)[i+1][j] = 0;
}
if(mergeCounter != 0 && (*output)[i+1][j] != 0){
(*output)[i][j] = (*output)[i+1][j];
(*output)[i+1][j] = 0;
}
}
mergeCounter = 0;
}
}
__device__ void downSolver(Board * input, Board * output){
int8_t i, j, moveCounter, mergeCounter;
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*output)[q][r] = (*input)[q][r];
}
}
//This section moves all items through the 0's.
//Might not need to dereference board pointers
moveCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=HEIGHT-1; i >= 0; i--){
if((*output)[i][j] == 0){
moveCounter++;
}
else if(moveCounter != 0){
(*output)[(i+moveCounter)][j] = (*output)[i][j];
(*output)[i][j] = 0;
}
}
moveCounter = 0;
}
mergeCounter = 0;
for(j=0; j < WIDTH; j++){
for(i=HEIGHT-1; i > 0; i--){
if((*output)[i][j] == (*output)[i-1][j]){
(*output)[i][j] = 2*((*output)[i][j]);
mergeCounter++;
(*output)[i-1][j] = 0;
}
if(mergeCounter != 0 && (*output)[i-1][j] != 0){
(*output)[i][j] = (*output)[i-1][j];
(*output)[i-1][j] = 0;
}
}
mergeCounter = 0;
}
}
/**
* This function adds the random move to the board. This will most likely change later on to fit with the CUDA program so they produce the same results.
* @param movedBoard A pointer to a Board object to have a random tile added to the board.
*/
__device__ void randGen(Board * input, Board * movedBoard){
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
(*movedBoard)[q][r] = (*input)[q][r];
}
}
unsigned long long seed= (*movedBoard)[0][0] + 2 * (*movedBoard)[0][1] + 3 * (*movedBoard)[0][2] + 4 * (*movedBoard)[0][3] + 5 * (*movedBoard)[1][0] + 6 * (*movedBoard)[1][1] + 7 * (*movedBoard)[1][2] + 8 * (*movedBoard)[1][3];
curandState_t state;
curand_init(seed, 0,0, &state);
unsigned int randNum = curand(&state);
unsigned char position = randNum % SIZE;
while((*movedBoard)[(position/WIDTH)][(position%HEIGHT)] != 0){
randNum = curand(&state);
position = randNum % SIZE;
}
unsigned int randomValue = curand(&state);
if(randomValue % 10 == 9){
(*movedBoard)[(position/WIDTH)][(position%HEIGHT)] = 4;
}
else{
(*movedBoard)[(position/WIDTH)][(position%HEIGHT)] = 2;
}
}
/**
*This takes the predetermined move and returns a Board that has had that move applied. This should be the link between the recursive section of the code and the solver
* @param input The board that is requested to be solved
* @param currMove The move to apply to the board
* @param output A pointer for the board after the move has occurred to be stored in.
* @return Returns the status of the move. Whether or not the board was updated.
*/
__device__ status moveHandler(Board * input, Board * output, Move currMove){
switch(currMove){
case(up):
// printf("Moving up \r\n");
upSolver(input, output);
break;
case down:
// printf("Moving down \r\n");
downSolver(input, output);
break;
case left:
// printf("Moving left \r\n");
leftSolver(input, output);
break;
case right:
// printf("Moving right \r\n");
rightSolver(input, output);
break;
}
bool changed = false;
bool fail = true;
for(uint8_t i=0; i < HEIGHT; i++){
for(uint8_t j=0; j < WIDTH;j++) {
if ((*output)[i][j] == 0) {
fail = false;
}
if ((*output)[i][j] != (*input)[i][j]) {
changed = true;
}
}
}
if(fail){
return boardFull;
}
else if(!changed){
return boardUnchanged;
}
return boardUpdated;
}
__global__ void maxReduce(int *d_idata, int *d_odata) {
__shared__ int sdata[512];
unsigned int tid = threadIdx.x;
unsigned int index = (blockIdx.x * blockDim.x) + tid;
sdata[tid] = d_idata[index];
__syncthreads();
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) {
if (tid < stride){
sdata[tid] = max(sdata[tid], sdata[tid + stride]);
}
}
__syncthreads();
if (tid == 0){
d_odata[blockIdx.x] = sdata[0];
}
}
__global__ void kernel(Board *BoardIn, int * scoreList){
int tx = threadIdx.x;
int bx = blockIdx.x;
int bd = blockDim.x;
uint32_t threadNum = bx * bd + tx;
Board boardIn;
Board boardOut;
int i,j;
for(i = 0; i < HEIGHT; i++){
for(j = 0; j < WIDTH; j++){
boardIn[i][j] = (*BoardIn)[i][j];
}
}
Board movedBoard;
status stat;
Move mList[NUMMOVES];
//Bitwise and with mask ends up creating many invalid moves (see: 0xC000 & 0xC000), need to rightshift
mList[0] = (Move) ((threadNum & m0Mask) >> 14);
mList[1] = (Move) ((threadNum & m1Mask) >> 12);
mList[2] = (Move) ((threadNum & m2Mask) >> 10);
mList[3] = (Move) ((threadNum & m3Mask) >> 8);
mList[4] = (Move) ((threadNum & m4Mask) >> 6);
mList[5] = (Move) ((threadNum & m5Mask) >> 4);
mList[6] = (Move) ((threadNum & m6Mask) >> 2);
mList[7] = (Move) ((threadNum & m7Mask));
scoreList[threadNum] = 0;
for(i = 0; i < NUMMOVES; i++){
stat = moveHandler(&boardIn,&boardOut,mList[i]);
if(stat != boardUpdated){
break;
}
if(i != 7){
randGen(&boardOut,&movedBoard);
for(int q = 0; q < HEIGHT; q++){
for(int r = 0; r < WIDTH; r++){
boardIn[q][r] = movedBoard[q][r];
}
}
}
else{
scoreList[threadNum] = score(&boardOut);
}
}
if(scoreList[threadNum] != 0){
//printf("DEBUG SCORE:%d\r\n",scoreList[threadNum]);
}
__syncthreads();
return;
}
int main(int argc, char **argv) {
wbArg_t arg;
Board hostInputBoard;
Board * deviceInputBoard;
char *inputBoardFile;
int *hostScoreList;
int *hostFinalScore;
int *deviceScoreList;
int *deviceFinalScore;
int Score;
int inputLength;
arg = wbArg_read(argc, argv);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
int numScores = (int)pow(4, 8);
int scoreListSize = numScores * sizeof(int);
int boardSize = SIZE * sizeof(int);
//inputBoardFile = (char *)wbImport(wbArg_getInputFile(arg, 0), &inputLength);
hostScoreList = (int *)malloc(scoreListSize);
hostFinalScore = (int *) malloc(blockSize * sizeof(int));
/*
for(int i = 0; i < HEIGHT; i++){
for(int j = 0; j < WIDTH; j++){
(*hostInputBoard)[i][j] = (*inputBoardFile)[i * WIDTH + j];
}
}
*/
hostInputBoard[0][0] = 0;
hostInputBoard[0][1] = 0;
hostInputBoard[0][2] = 0;
hostInputBoard[0][3] = 0;
hostInputBoard[1][0] = 2;
hostInputBoard[1][1] = 0;
hostInputBoard[1][2] = 0;
hostInputBoard[1][3] = 0;
hostInputBoard[2][0] = 0;
hostInputBoard[2][1] = 2;
hostInputBoard[2][2] = 256;
hostInputBoard[2][3] = 0;
hostInputBoard[3][0] = 0;
hostInputBoard[3][1] = 0;
hostInputBoard[3][2] = 0;
hostInputBoard[3][3] = 0;
wbCheck(cudaMalloc((void**)&deviceScoreList, scoreListSize));
wbCheck(cudaMalloc((void**)&deviceInputBoard, boardSize));
//CUDA MALLOC SINGLE SCORE FROM EACH BLOCK
wbCheck(cudaMalloc((void**)&deviceFinalScore, blockSize * sizeof(int)));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
wbCheck(cudaMemcpy(deviceInputBoard, &hostInputBoard, boardSize, cudaMemcpyHostToDevice));
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 DimGrid(256, 1, 1);
dim3 DimBlock(256, 1, 1);
kernel<<<DimGrid, DimBlock>>>(deviceInputBoard,deviceScoreList);
wbTime_stop(Compute, "Doing the computation on the GPU");
cudaDeviceSynchronize();
wbCheck(cudaPeekAtLastError());
////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
wbCheck(cudaMemcpy(hostScoreList, deviceScoreList, scoreListSize, cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying data from the GPU");
//RUN REDUCTION KERNEL
maxReduce<<<DimGrid,DimBlock>>>(deviceScoreList, deviceFinalScore);
wbCheck(cudaMemcpy(hostFinalScore, deviceFinalScore, blockSize * sizeof(int), cudaMemcpyDeviceToHost));
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
long i;
int upCount = 0;
int downCount = 0;
int leftCount = 0;
int rightCount = 0;
for(i = 0; i < blockSize; i++){
printf("Return %d: %d \r\n", i, hostFinalScore[i]);
}
//Determine the highest Board Score
for (i = 0; i < eighthSize; i++) {
printf("%d : %d \r\n", i, hostScoreList[i]);
if (hostScoreList[i] == hostFinalScore[0]) {
if(i < quarterSize){
upCount++;
} else if(i < halfSize){
downCount++;
} else if(i < threeQuarterSize){
leftCount++;
} else{
rightCount++;
}
}
}
int total = upCount + downCount + leftCount + rightCount;
printf("Up CHANCE: %d / %d \r\n", upCount, total);
printf("Down CHANCE: %d / %d \r\n", downCount, total);
printf("Left CHANCE: %d / %d \r\n", leftCount, total);
printf("Right CHANCE: %d / %d \r\n", rightCount, total);
printf("Highest Score Predicted: %ld \r\n", hostFinalScore[0]);
wbSolution(arg, hostScoreList, scoreListSize);
wbCheck(cudaFree(deviceScoreList));
wbCheck(cudaFree(deviceInputBoard));
wbCheck(cudaFree(deviceFinalScore));
free(hostScoreList);
free(hostFinalScore);
return 0;
}
|
daa3288a518356abb1457c230659585834cae6e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(this->prune_) {
caffe_gpu_prune<Dtype>(this->blobs_[0]->count(), this->blobs_[0]->mutable_gpu_data(),
this->masks_[0]->mutable_gpu_data(), this->pruning_threshold_);
if(this->bias_term_){
caffe_gpu_prune<Dtype>(this->blobs_[1]->count(), this->blobs_[1]->mutable_gpu_data(),
this->masks_[1]->mutable_gpu_data(), this->pruning_threshold_);
}
}
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
if(this->prune_) {
caffe_gpu_mul<Dtype>(this->blobs_[0]->count(), this->blobs_[0]->gpu_diff(),
this->masks_[0]->gpu_data(), this->blobs_[0]->mutable_gpu_diff());
if(this->bias_term_ && this->param_propagate_down_[1]){
caffe_gpu_mul<Dtype>(this->blobs_[1]->count(), this->blobs_[1]->gpu_diff(),
this->masks_[1]->gpu_data(), this->blobs_[1]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| daa3288a518356abb1457c230659585834cae6e4.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(this->prune_) {
caffe_gpu_prune<Dtype>(this->blobs_[0]->count(), this->blobs_[0]->mutable_gpu_data(),
this->masks_[0]->mutable_gpu_data(), this->pruning_threshold_);
if(this->bias_term_){
caffe_gpu_prune<Dtype>(this->blobs_[1]->count(), this->blobs_[1]->mutable_gpu_data(),
this->masks_[1]->mutable_gpu_data(), this->pruning_threshold_);
}
}
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
if(this->prune_) {
caffe_gpu_mul<Dtype>(this->blobs_[0]->count(), this->blobs_[0]->gpu_diff(),
this->masks_[0]->gpu_data(), this->blobs_[0]->mutable_gpu_diff());
if(this->bias_term_ && this->param_propagate_down_[1]){
caffe_gpu_mul<Dtype>(this->blobs_[1]->count(), this->blobs_[1]->gpu_diff(),
this->masks_[1]->gpu_data(), this->blobs_[1]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
4ed1cdba3c2c1f858ccca3fd066ac926899dbfef.hip | // !!! This is a file automatically generated by hipify!!!
/**
* This code is for the ECE/CSC 506 programming assignment.
* Code by Kartik Haria
*/
/**
* Vector Calculation: C = (5A^4 + 2A^2 + B)/(D^2).
*
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#define OPT 1
__global__ void
vectorAdd(const float *A, const float *B, float *C, float *D, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
#if OPT==1
// Insert your optimized code below
if (i < numElements)
{
float tempA = A[i]*A[i];
float tempD = 1/(D[i]*D[i]);
C[i] = (tempA*(5*tempA + 2)+ B[i])*tempD;
}
// Insert your optimized code above
#endif
#if OPT==2
// Insert your optimized code below
if (i < numElements)
{
float tempA = A[i]*A[i];
float tempD = D[i]*D[i];
C[i] = (tempA*(5*tempA + 2)+ B[i])/tempD;
}
// Insert your optimized code above
#endif
#if OPT==0
if (i < numElements)
{
C[i] = (5*A[i]*A[i]*A[i]*A[i]+ 2*A[i]*A[i]+ B[i])/(D[i]*D[i]);
}
#endif
}
/**
* Host main routine
* Read through the main function and understand the memory allocation, memory copy and freeing the memory. Then insert appropriate code for vector D.
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Allocate the host output vector D
float *h_D = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || h_D == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
h_D[i] = rand()/(float)RAND_MAX;
// insert code here to allocate random variables to vector D
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_D = NULL;
err = hipMalloc((void **)&d_D, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector D (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_D, h_D, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector D from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_D, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs((((5*h_A[i]*h_A[i]*h_A[i]*h_A[i] + 2*h_A[i]*h_A[i]+ h_B[i])/(h_D[i]*h_D[i])) - h_C[i])/h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d by %f !\n", i, fabs(((5*h_A[i]*h_A[i]*h_A[i]*h_A[i] + 2*h_A[i]*h_A[i]+ h_B[i])/(h_D[i]*h_D[i])) - h_C[i]));
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_D);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector D (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(h_D);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 4ed1cdba3c2c1f858ccca3fd066ac926899dbfef.cu | /**
* This code is for the ECE/CSC 506 programming assignment.
* Code by Kartik Haria
*/
/**
* Vector Calculation: C = (5A^4 + 2A^2 + B)/(D^2).
*
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#define OPT 1
__global__ void
vectorAdd(const float *A, const float *B, float *C, float *D, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
#if OPT==1
// Insert your optimized code below
if (i < numElements)
{
float tempA = A[i]*A[i];
float tempD = 1/(D[i]*D[i]);
C[i] = (tempA*(5*tempA + 2)+ B[i])*tempD;
}
// Insert your optimized code above
#endif
#if OPT==2
// Insert your optimized code below
if (i < numElements)
{
float tempA = A[i]*A[i];
float tempD = D[i]*D[i];
C[i] = (tempA*(5*tempA + 2)+ B[i])/tempD;
}
// Insert your optimized code above
#endif
#if OPT==0
if (i < numElements)
{
C[i] = (5*A[i]*A[i]*A[i]*A[i]+ 2*A[i]*A[i]+ B[i])/(D[i]*D[i]);
}
#endif
}
/**
* Host main routine
* Read through the main function and understand the memory allocation, memory copy and freeing the memory. Then insert appropriate code for vector D.
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Allocate the host output vector D
float *h_D = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || h_D == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
h_D[i] = rand()/(float)RAND_MAX;
// insert code here to allocate random variables to vector D
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_D = NULL;
err = cudaMalloc((void **)&d_D, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector D (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_D, h_D, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector D from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_D, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs((((5*h_A[i]*h_A[i]*h_A[i]*h_A[i] + 2*h_A[i]*h_A[i]+ h_B[i])/(h_D[i]*h_D[i])) - h_C[i])/h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d by %f !\n", i, fabs(((5*h_A[i]*h_A[i]*h_A[i]*h_A[i] + 2*h_A[i]*h_A[i]+ h_B[i])/(h_D[i]*h_D[i])) - h_C[i]));
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_D);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector D (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(h_D);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
cac1aa62e5996cd08ab64ef412802b1e845c039b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/hip/execution_policy.h>
#include "caffe2/operators/summarize_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
// structure used to accumulate the moments and other statistical properties
// encountered so far.
template <typename T>
struct SummaryStatsData {
T n;
T min;
T max;
T mean;
T M2;
// initialize to the identity element
void initialize() {
n = mean = M2 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op {
__host__ __device__ SummaryStatsData<T> operator()(const T& x) const {
SummaryStatsData<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two SummaryStatsData
// structs and returns a new SummaryStatsData which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const SummaryStatsData<T>&,
const SummaryStatsData<T>&,
SummaryStatsData<T> > {
__host__ __device__ SummaryStatsData<T> operator()(
const SummaryStatsData<T>& x, const SummaryStatsData <T>& y) const {
SummaryStatsData<T> result;
T n = x.n + y.n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
return result;
}
};
} // namespace
template<>
bool SummarizeOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
const int N = X.size();
DCHECK_GT(N, 0);
// TODO(Yangqing): Any better way to avoid having to const cast?
thrust::device_ptr<float> Xdata(const_cast<float*>(X.data<float>()));
summary_stats_unary_op<float> unary_op;
summary_stats_binary_op<float> binary_op;
SummaryStatsData<float> init;
init.initialize();
// compute summary statistics
SummaryStatsData<float> result = thrust::transform_reduce(
#if THRUST_VERSION >= 100800
thrust::hip::par.on(context_.cuda_stream()),
#endif // THRUST_VERSION >= 100800
Xdata, Xdata + N, unary_op, init, binary_op);
float standard_deviation = std::sqrt(result.variance());
if (to_file_) {
(*log_file_) << result.min << " " << result.max << " " << result.mean << " "
<< standard_deviation << std::endl;
}
if (OutputSize()) {
auto* Y = Output(0);
Y->Resize(4);
float output_buffer[NUM_STATS] = {result.min, result.max, result.mean,
standard_deviation};
context_.CopyFromCPU<float>(
NUM_STATS, output_buffer, Y->template mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(Summarize, SummarizeOp<float, CUDAContext>);
} // namespace caffe2
| cac1aa62e5996cd08ab64ef412802b1e845c039b.cu | #include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include "caffe2/operators/summarize_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
// structure used to accumulate the moments and other statistical properties
// encountered so far.
template <typename T>
struct SummaryStatsData {
T n;
T min;
T max;
T mean;
T M2;
// initialize to the identity element
void initialize() {
n = mean = M2 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op {
__host__ __device__ SummaryStatsData<T> operator()(const T& x) const {
SummaryStatsData<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two SummaryStatsData
// structs and returns a new SummaryStatsData which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const SummaryStatsData<T>&,
const SummaryStatsData<T>&,
SummaryStatsData<T> > {
__host__ __device__ SummaryStatsData<T> operator()(
const SummaryStatsData<T>& x, const SummaryStatsData <T>& y) const {
SummaryStatsData<T> result;
T n = x.n + y.n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
return result;
}
};
} // namespace
template<>
bool SummarizeOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
const int N = X.size();
DCHECK_GT(N, 0);
// TODO(Yangqing): Any better way to avoid having to const cast?
thrust::device_ptr<float> Xdata(const_cast<float*>(X.data<float>()));
summary_stats_unary_op<float> unary_op;
summary_stats_binary_op<float> binary_op;
SummaryStatsData<float> init;
init.initialize();
// compute summary statistics
SummaryStatsData<float> result = thrust::transform_reduce(
#if THRUST_VERSION >= 100800
thrust::cuda::par.on(context_.cuda_stream()),
#endif // THRUST_VERSION >= 100800
Xdata, Xdata + N, unary_op, init, binary_op);
float standard_deviation = std::sqrt(result.variance());
if (to_file_) {
(*log_file_) << result.min << " " << result.max << " " << result.mean << " "
<< standard_deviation << std::endl;
}
if (OutputSize()) {
auto* Y = Output(0);
Y->Resize(4);
float output_buffer[NUM_STATS] = {result.min, result.max, result.mean,
standard_deviation};
context_.CopyFromCPU<float>(
NUM_STATS, output_buffer, Y->template mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(Summarize, SummarizeOp<float, CUDAContext>);
} // namespace caffe2
|
eaf956358f7c509239e722a61583e9f2c54ce569.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include "cpu.h"
namespace StreamCompaction {
namespace CPU {
/**
* CPU scan (prefix sum).
*/
void scan(int n, int *odata, const int *idata) {
float time = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
odata[0] = 0;
for(int i = 1; i<n; i++) {
odata[i] = odata[i-1] + idata[i-1];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("CPU scan time is %.4f ms \n", time);
}
/**
* CPU stream compaction without using the scan function.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithoutScan(int n, int *odata, const int *idata) {
// TODO
int count = 0;
for( int i = 0; i<n; i++) {
if ( idata[i] != 0 ) {
odata[count] = idata[i];
count++;
}
}
return count;
}
/**
* CPU stream compaction using scan and scatter, like the parallel version.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
// TODO
int temp[n];
int idx[n];
for(int i = 0; i<n; i++) {
if(idata[i] != 0) {
temp[i] = 1;
} else {
temp[i] = 0;
}
}
scan(n, idx, temp);
for(int j = 0; j<n; j++) {
if(temp[j] == 1) {
odata[idx[j]] = idata[j];
}
}
return idx[n-1];
}
}
}
| eaf956358f7c509239e722a61583e9f2c54ce569.cu | #include <cstdio>
#include "cpu.h"
namespace StreamCompaction {
namespace CPU {
/**
* CPU scan (prefix sum).
*/
void scan(int n, int *odata, const int *idata) {
float time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
odata[0] = 0;
for(int i = 1; i<n; i++) {
odata[i] = odata[i-1] + idata[i-1];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("CPU scan time is %.4f ms \n", time);
}
/**
* CPU stream compaction without using the scan function.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithoutScan(int n, int *odata, const int *idata) {
// TODO
int count = 0;
for( int i = 0; i<n; i++) {
if ( idata[i] != 0 ) {
odata[count] = idata[i];
count++;
}
}
return count;
}
/**
* CPU stream compaction using scan and scatter, like the parallel version.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
// TODO
int temp[n];
int idx[n];
for(int i = 0; i<n; i++) {
if(idata[i] != 0) {
temp[i] = 1;
} else {
temp[i] = 0;
}
}
scan(n, idx, temp);
for(int j = 0; j<n; j++) {
if(temp[j] == 1) {
odata[idx[j]] = idata[j];
}
}
return idx[n-1];
}
}
}
|
de54dec6a1d789c0cb631ba94da317a44db723ec.hip | // !!! This is a file automatically generated by hipify!!!
// clang-format off
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../../graph_parser/parse.h"
#include "../../graph_parser/util.h"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for cc
err = hipMalloc(&cc_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc cc_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<int, int> **vertex;
GraphChiContext *context;
err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&context, sizeof(GraphChiContext));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initObj\n");
hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d,
incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
hipLaunchKernelGGL(( ConnectedComponent), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
printf("Finish ConnectedComponent\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, cc_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
| de54dec6a1d789c0cb631ba94da317a44db723ec.cu | // clang-format off
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../../graph_parser/parse.h"
#include "../../graph_parser/util.h"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for cc
err = cudaMalloc(&cc_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc cc_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<int, int> **vertex;
GraphChiContext *context;
err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&context, sizeof(GraphChiContext));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
initContext<<<1, 1>>>(context, num_nodes, num_edges);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initObj\n");
initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d,
incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
ConnectedComponent<<<grid, threads>>>(vertex, context, i);
printf("Finish ConnectedComponent\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, cc_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
3f9348ea25d9ff7e9618359e30fd7f1d917a8fdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "user_defined_types.h"
#include "op_datatypes.h"
#include "kernels.h"
__device__
#include <update.h>
__global__
void op_cuda_update(float *arg0,float *arg1,float *arg2,float *arg3,float *arg4,int set_size,void *block_reduct4)
{
float arg4_l[1];
for (int d = 0; d < 1; ++d) {
arg4_l[d] = 0;
}
for (int n = threadIdx.x + blockIdx.x * blockDim.x; n < set_size; n += blockDim.x * gridDim.x) {
update(arg0 + n * 4,arg1 + n * 4,arg2 + n * 4,arg3 + n * 1,arg4_l);
}
for (int d = 0; d < 1; ++d) {
op_reduction2_1<OP_INC>(arg4 + d,arg4_l[d],block_reduct4);
}
}
__global__
void op_cuda_update_reduction(int gridsize,float *arg4,void *block_reduct4)
{
for (int d = 0; d < 1; ++d) {
op_reduction2_2<OP_INC>(arg4 + d,block_reduct4,gridsize);
}
}
float op_par_loop_update(const char *name,op_set set,struct op_dat<void> *arg0,int idx0,op_ptr *ptr0,enum op_access acc0,struct op_dat<void> *arg1,int idx1,op_ptr *ptr1,enum op_access acc1,struct op_dat<void> *arg2,int idx2,op_ptr *ptr2,enum op_access acc2,struct op_dat<void> *arg3,int idx3,op_ptr *ptr3,enum op_access acc3,struct op_dat<void> *arg4,int idx4,op_ptr *ptr4,enum op_access acc4)
{
int bsize = BSIZE;
int gridsize = (set.size - 1) / bsize + 1;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(1 * sizeof(float ));
reduct_size = MAX(reduct_size,sizeof(float ));
int reduct_shared = reduct_size * (BSIZE / 2);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
push_op_dat_as_reduct(*arg4,reduct_bytes);
reduct_bytes += ROUND_UP(1 * sizeof(float ));
mvReductArraysToDevice(reduct_bytes);
void *block_reduct4 = 0;
hipMalloc(&block_reduct4,gridsize * sizeof(float ));
int const_bytes = 0;
hipEvent_t start, stop;
float elapsed_time_ms = 0.00000F;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( op_cuda_update), dim3(gridsize),dim3(bsize),reduct_shared, 0, ((float *)arg0->dat_d),((float *)arg1->dat_d),((float *)arg2->dat_d),((float *)arg3->dat_d),((float *)arg4->dat_d),set.size,block_reduct4);
hipLaunchKernelGGL(( op_cuda_update_reduction), dim3(1),dim3(1),reduct_shared, 0, gridsize,((float *)arg4->dat_d),block_reduct4);
hipEventRecord(stop,0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms,start,stop);
hipEventDestroy(start);
hipEventDestroy(stop);
mvReductArraysToHost(reduct_bytes);
pop_op_dat_as_reduct(*arg4);
hipFree(block_reduct4);
return elapsed_time_ms;
}
| 3f9348ea25d9ff7e9618359e30fd7f1d917a8fdf.cu | #include "user_defined_types.h"
#include "op_datatypes.h"
#include "kernels.h"
__device__
#include <update.h>
__global__
void op_cuda_update(float *arg0,float *arg1,float *arg2,float *arg3,float *arg4,int set_size,void *block_reduct4)
{
float arg4_l[1];
for (int d = 0; d < 1; ++d) {
arg4_l[d] = 0;
}
for (int n = threadIdx.x + blockIdx.x * blockDim.x; n < set_size; n += blockDim.x * gridDim.x) {
update(arg0 + n * 4,arg1 + n * 4,arg2 + n * 4,arg3 + n * 1,arg4_l);
}
for (int d = 0; d < 1; ++d) {
op_reduction2_1<OP_INC>(arg4 + d,arg4_l[d],block_reduct4);
}
}
__global__
void op_cuda_update_reduction(int gridsize,float *arg4,void *block_reduct4)
{
for (int d = 0; d < 1; ++d) {
op_reduction2_2<OP_INC>(arg4 + d,block_reduct4,gridsize);
}
}
float op_par_loop_update(const char *name,op_set set,struct op_dat<void> *arg0,int idx0,op_ptr *ptr0,enum op_access acc0,struct op_dat<void> *arg1,int idx1,op_ptr *ptr1,enum op_access acc1,struct op_dat<void> *arg2,int idx2,op_ptr *ptr2,enum op_access acc2,struct op_dat<void> *arg3,int idx3,op_ptr *ptr3,enum op_access acc3,struct op_dat<void> *arg4,int idx4,op_ptr *ptr4,enum op_access acc4)
{
int bsize = BSIZE;
int gridsize = (set.size - 1) / bsize + 1;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(1 * sizeof(float ));
reduct_size = MAX(reduct_size,sizeof(float ));
int reduct_shared = reduct_size * (BSIZE / 2);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
push_op_dat_as_reduct(*arg4,reduct_bytes);
reduct_bytes += ROUND_UP(1 * sizeof(float ));
mvReductArraysToDevice(reduct_bytes);
void *block_reduct4 = 0;
cudaMalloc(&block_reduct4,gridsize * sizeof(float ));
int const_bytes = 0;
cudaEvent_t start, stop;
float elapsed_time_ms = 0.00000F;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
op_cuda_update<<<gridsize,bsize,reduct_shared>>>(((float *)arg0->dat_d),((float *)arg1->dat_d),((float *)arg2->dat_d),((float *)arg3->dat_d),((float *)arg4->dat_d),set.size,block_reduct4);
op_cuda_update_reduction<<<1,1,reduct_shared>>>(gridsize,((float *)arg4->dat_d),block_reduct4);
cudaEventRecord(stop,0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
mvReductArraysToHost(reduct_bytes);
pop_op_dat_as_reduct(*arg4);
cudaFree(block_reduct4);
return elapsed_time_ms;
}
|
2c067e027101667b1cef408023ccfd4bb18a791e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <SDL2/SDL.h>
#include <string>
#include <stdexcept>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "cuda_common.h"
#include <hip/hip_runtime_api.h>
#include "complex.h"
#include "cuda_common.h"
#include "RK3.h"
#include "constants.h"
#include <typeinfo>
//https://nvlabs.github.io/cub/
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "textures.h"
#include "functions.h"
#include "launch_kernel.cuh"
#include "device.h"
__device__ inline int indexx(int i, int j){
return threadIdx.x + 1 + i + (threadIdx.y + 1 + j) * (blockDim.x+2);
}
template<class Real, bool usetex>
__global__ void rungekutta_step3_smem(RK3Arg<Real> arg){
complex *smem = SharedMemory<complex>();
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
id = i + j * nx;
int it = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x+2);
if(i<nx && j<ny) smem[it] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id);
if(i>0 && j>0 && i<nx-1 && j<ny-1){
if(threadIdx.x == 0) smem[it-1] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id-1);
if(threadIdx.y == 0) smem[index(0,-1)] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id-nx);
if(threadIdx.x == blockDim.x - 1) smem[it+1] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id+1);
if(threadIdx.y == blockDim.y - 1) smem[index(0,1)] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id+nx);
}
__syncthreads();
Real val = 0.0;
if(i>0 && j>0 && i<nx-1 && j<ny-1){
complex seconddxy = (smem[it+1] - smem[it] * 2.0 + smem[it-1]) * 0.5 * oneoverdx2<Real>();
complex firstdx = (smem[it+1] - smem[it-1]) * 0.5 * oneoverdx<Real>();
seconddxy += (smem[indexx(0,1)] - smem[it] * 2.0 + smem[indexx(0,-1)]) * 0.5 * oneoverdy2<Real>();
complex firstdy = (smem[indexx(0,1)] - smem[indexx(0,-1)]) * 0.5 * oneoverdy<Real>();
Real px = i * dx<Real>() - Lx<Real>();
Real py = j * dy<Real>() - Ly<Real>();
firstdx = firstdy * px - firstdx * py;
px = ( (px * px + py * py) * 0.5) + (smem[it].abs2() * g<Real>());
complex res = smem[it] * px;
res = res - seconddxy - Iomega<Real>() * firstdx;;
seconddxy = ELEM_LOAD<usetex, 0, Real>(arg.a0, id);
res = res * InvIminusGamma<Real>();
px = 2.0 / 3.0;
py = 1.0/ 3.0;
res = seconddxy * py + smem[it] * px + res * (dt<Real>() * px);
arg.a0[id] = res;
val = res.abs2();
}
__syncthreads();
Real *smem0 = (Real*)smem;
smem0[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem0[threadIdx.x] += smem0[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem0[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem0[s];
CudaAtomicAdd(arg.d_sum, sum);
}
}
template<class Real, bool usetex>
__global__ void rungekutta_step3(RK3Arg<Real> arg){
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
Real val = 0.0;
if(i>0 && j>0 && i<nx-1 && j<ny-1){
id = i + j * nx;
complex firstdx = ELEM_LOAD<usetex, 2, Real>(arg.a2, id + 1);
complex res = ELEM_LOAD<usetex, 2, Real>(arg.a2, id - 1);
complex actual = ELEM_LOAD<usetex, 2, Real>(arg.a2, id);
complex seconddxy = (firstdx - actual * 2.0 + res) * 0.5 * oneoverdx2<Real>();
firstdx = (firstdx - res) * 0.5 * oneoverdx<Real>();
complex firstdy = ELEM_LOAD<usetex, 2, Real>(arg.a2, id + nx);
res = ELEM_LOAD<usetex, 2, Real>(arg.a2, id - nx);
seconddxy += (firstdy - actual * 2.0 + res) * 0.5 * oneoverdy2<Real>();
firstdy = (firstdy - res) * 0.5 * oneoverdy<Real>();
Real px = i * dx<Real>() - Lx<Real>();
Real py = j * dy<Real>() - Ly<Real>();
firstdx = firstdy * px - firstdx * py;
px = ( (px * px + py * py) * 0.5) + (actual.abs2() * g<Real>());
res = actual * px;
res = res - seconddxy - Iomega<Real>() * firstdx;
seconddxy = ELEM_LOAD<usetex, 0, Real>(arg.a0, id);
res = res * InvIminusGamma<Real>();
px = 2.0 / 3.0;
py = 1.0/ 3.0;
actual = seconddxy * py + actual * px + res * (dt<Real>() * px);
arg.a0[id] = actual;
val = actual.abs2();
}
Real *smem = SharedMemory<Real>();
smem[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem[threadIdx.x] += smem[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem[s];
CudaAtomicAdd(arg.d_sum, sum);
}
}
template <class Real>
void RK3<Real>::setup(complex *a0_, complex *a1_, complex *a2_, Real* d_sum, size_t nx_, size_t ny_) {
arg.a0 = a0_;
arg.a1 = a1_;
arg.a2 = a2_;
arg.d_sum = d_sum;
nx = nx_;
ny = ny_;
tuning = true;
tp.block = make_uint3(32,1,1);
tp.grid = make_uint3((nx*ny+tp.block.x-1)/tp.block.x, 1, 1);
typ = 0;
}
template <class Real>
void RK3<Real>::tune(){
CUDA_SAFE_CALL(hipGetDevice( &dev));
CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev));
//Timer t0;
uint3 block = make_uint3(32,1,1);
uint3 grid = make_uint3((nx*ny+block.x-1)/block.x, 1, 1);
size_t shared_bytes = 0;
if(typ>=2) shared_bytes = (block.x + 2)*(block.y+2) * sizeof(complex);
else shared_bytes = (block.x)*(block.y) * sizeof(Real);
tp.time = 9999999999.0;
size_t size = nx*ny * sizeof(complex);
CUDA_SAFE_CALL(hipMalloc(&tmp, size));
CUDA_SAFE_CALL(hipMemcpy(tmp, arg.a0, size, hipMemcpyDeviceToDevice));
hipError_t error;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
float elapsed_time;
name = typeid(*this).name();
name += "_" + ToString(typ);
while(tuning){
hipDeviceSynchronize();
hipGetLastError(); // clear error counter
//t0.start();
hipEventRecord(start, 0);
callKernel(grid, block, shared_bytes);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed_time, start, end);
hipDeviceSynchronize();
error = hipGetLastError();
{ // check that error state is cleared
hipDeviceSynchronize();
hipError_t error1 = hipGetLastError();
if (error1 != hipSuccess){
printf("Failed to clear error state %s\n", hipGetErrorString(error1));
exit(1);
}
}
if( tp.time > elapsed_time && (error == hipSuccess) ){
tp.block = block;
tp.grid = grid;
tp.shared_bytes = shared_bytes;
tp.time = elapsed_time;
}
if(getVerbosity() == DEBUG_VERBOSE){
if( (error == hipSuccess) ) std::cout << name << ": block=(" << block.x << "," << block.y << ")=" << block.x*block.y << ", grid=(" << grid.x << "," << grid.y << "), smem=" << shared_bytes/(1024.) << " KB, time=" << elapsed_time << " ms, " << flops(elapsed_time) << " Gflop/s, " << bwdth(elapsed_time) << " GB/s" << std::endl;
else std::cout << name << ": block=(" << block.x << "," << block.y << ")=" << block.x*block.y << ", grid=(" << grid.x << "," << grid.y << "), smem=" << shared_bytes/(1024.) << " KB, error: " << hipGetErrorString(error) << std::endl;
}
block.x += 32;
int blocksize = block.x * block.y;
if(typ>=2) shared_bytes = (block.x + 2)*(block.y+2) * sizeof(complex);
else shared_bytes = (block.x)*(block.y) * sizeof(Real);
grid = make_uint3((nx*ny+block.x-1)/block.x, 1, 1);
if(block.x > deviceProp.maxThreadsDim[0] || grid.x > deviceProp.maxGridSize[0] || blocksize > deviceProp.maxThreadsPerBlock) tuning = false;
if(block.y > deviceProp.maxThreadsDim[1] || grid.y > deviceProp.maxGridSize[1] || blocksize > deviceProp.maxThreadsPerBlock) tuning = false;
}
CUDA_SAFE_CALL(hipEventDestroy( start));
CUDA_SAFE_CALL(hipEventDestroy( end));
tuning = false;
CUDA_SAFE_CALL(hipMemcpy(arg.a0, tmp, size, hipMemcpyDeviceToDevice));
CUDA_SAFE_CALL(hipFree(tmp));
if( getVerbosity() >= SUMMARIZE ) std::cout << name << ": block=(" << tp.block.x << "," << tp.block.y << ")=" << tp.block.x*tp.block.y << ", grid=(" << tp.grid.x << "," << tp.grid.y << "), smem=" << tp.shared_bytes/(1024.) << " KB, time=" << tp.time << " ms, " << flops(tp.time) << " Gflop/s, " << bwdth(tp.time) << " GB/s\n" << std::endl;
}
template <class Real>
void RK3<Real>::run(uint typ_){
typ = typ_;
std::string name1 = typeid(*this).name();
name1 += "_" + ToString(typ);
if(tuning || name != name1){
tuning = true;
tune();
}
CUDA_SAFE_CALL(hipMemset(arg.d_sum, 0, sizeof(Real)));
callKernel(tp.grid, tp.block, tp.shared_bytes);
CUDA_SAFE_THREAD_SYNC();
CUT_CHECK_ERROR("RK3 failed.");
}
template <class Real>
void RK3<Real>::callKernel(uint3 grid, uint3 block, size_t smem){
switch(typ){
case 0:
hipLaunchKernelGGL(( rungekutta_step3<Real, false>), dim3(grid), dim3(block), smem, 0, arg);
break;
case 1:
hipLaunchKernelGGL(( rungekutta_step3<Real, true>), dim3(grid), dim3(block), smem, 0, arg);
break;
case 2:
hipLaunchKernelGGL(( rungekutta_step3_smem<Real, false>), dim3(grid), dim3(block), smem, 0, arg);
break;
case 3:
hipLaunchKernelGGL(( rungekutta_step3_smem<Real, true>), dim3(grid), dim3(block), smem, 0, arg);
break;
default:
hipLaunchKernelGGL(( rungekutta_step3<Real, false>), dim3(grid), dim3(block), smem, 0, arg);
break;
}
}
template <class Real>
float RK3<Real>::flops(float time_ms) const{
return flop() * 1.0e-6 / time_ms;
}
template <class Real>
long long RK3<Real>::flop() const{
return 86LL * (nx-2) * (ny-2);
}
template <class Real>
long long RK3<Real>::bytes() const{
return 7LL * (nx-2) * (ny-2) * sizeof(complex);
}
template <class Real>
float RK3<Real>::bwdth(float time_ms) const{
return bytes() * 1.0e-6 / time_ms;
}
template class RK3<float>;
template class RK3<double>;
| 2c067e027101667b1cef408023ccfd4bb18a791e.cu |
#include <iostream>
#include <SDL2/SDL.h>
#include <string>
#include <stdexcept>
#include <cstdio>
#include <cuda.h>
#include "cuda_common.h"
#include <cuda_runtime_api.h>
#include "complex.h"
#include "cuda_common.h"
#include "RK3.h"
#include "constants.h"
#include <typeinfo>
//https://nvlabs.github.io/cub/
#include <cub/cub.cuh>
#include <cub/block/block_reduce.cuh>
#include "textures.h"
#include "functions.h"
#include "launch_kernel.cuh"
#include "device.h"
__device__ inline int indexx(int i, int j){
return threadIdx.x + 1 + i + (threadIdx.y + 1 + j) * (blockDim.x+2);
}
template<class Real, bool usetex>
__global__ void rungekutta_step3_smem(RK3Arg<Real> arg){
complex *smem = SharedMemory<complex>();
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
id = i + j * nx;
int it = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x+2);
if(i<nx && j<ny) smem[it] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id);
if(i>0 && j>0 && i<nx-1 && j<ny-1){
if(threadIdx.x == 0) smem[it-1] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id-1);
if(threadIdx.y == 0) smem[index(0,-1)] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id-nx);
if(threadIdx.x == blockDim.x - 1) smem[it+1] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id+1);
if(threadIdx.y == blockDim.y - 1) smem[index(0,1)] = ELEM_LOAD<usetex, 2, Real>(arg.a2, id+nx);
}
__syncthreads();
Real val = 0.0;
if(i>0 && j>0 && i<nx-1 && j<ny-1){
complex seconddxy = (smem[it+1] - smem[it] * 2.0 + smem[it-1]) * 0.5 * oneoverdx2<Real>();
complex firstdx = (smem[it+1] - smem[it-1]) * 0.5 * oneoverdx<Real>();
seconddxy += (smem[indexx(0,1)] - smem[it] * 2.0 + smem[indexx(0,-1)]) * 0.5 * oneoverdy2<Real>();
complex firstdy = (smem[indexx(0,1)] - smem[indexx(0,-1)]) * 0.5 * oneoverdy<Real>();
Real px = i * dx<Real>() - Lx<Real>();
Real py = j * dy<Real>() - Ly<Real>();
firstdx = firstdy * px - firstdx * py;
px = ( (px * px + py * py) * 0.5) + (smem[it].abs2() * g<Real>());
complex res = smem[it] * px;
res = res - seconddxy - Iomega<Real>() * firstdx;;
seconddxy = ELEM_LOAD<usetex, 0, Real>(arg.a0, id);
res = res * InvIminusGamma<Real>();
px = 2.0 / 3.0;
py = 1.0/ 3.0;
res = seconddxy * py + smem[it] * px + res * (dt<Real>() * px);
arg.a0[id] = res;
val = res.abs2();
}
__syncthreads();
Real *smem0 = (Real*)smem;
smem0[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem0[threadIdx.x] += smem0[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem0[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem0[s];
CudaAtomicAdd(arg.d_sum, sum);
}
}
template<class Real, bool usetex>
__global__ void rungekutta_step3(RK3Arg<Real> arg){
uint id = threadIdx.x + blockIdx.x * blockDim.x;
uint i = id % nx;
uint j = id / nx;
Real val = 0.0;
if(i>0 && j>0 && i<nx-1 && j<ny-1){
id = i + j * nx;
complex firstdx = ELEM_LOAD<usetex, 2, Real>(arg.a2, id + 1);
complex res = ELEM_LOAD<usetex, 2, Real>(arg.a2, id - 1);
complex actual = ELEM_LOAD<usetex, 2, Real>(arg.a2, id);
complex seconddxy = (firstdx - actual * 2.0 + res) * 0.5 * oneoverdx2<Real>();
firstdx = (firstdx - res) * 0.5 * oneoverdx<Real>();
complex firstdy = ELEM_LOAD<usetex, 2, Real>(arg.a2, id + nx);
res = ELEM_LOAD<usetex, 2, Real>(arg.a2, id - nx);
seconddxy += (firstdy - actual * 2.0 + res) * 0.5 * oneoverdy2<Real>();
firstdy = (firstdy - res) * 0.5 * oneoverdy<Real>();
Real px = i * dx<Real>() - Lx<Real>();
Real py = j * dy<Real>() - Ly<Real>();
firstdx = firstdy * px - firstdx * py;
px = ( (px * px + py * py) * 0.5) + (actual.abs2() * g<Real>());
res = actual * px;
res = res - seconddxy - Iomega<Real>() * firstdx;
seconddxy = ELEM_LOAD<usetex, 0, Real>(arg.a0, id);
res = res * InvIminusGamma<Real>();
px = 2.0 / 3.0;
py = 1.0/ 3.0;
actual = seconddxy * py + actual * px + res * (dt<Real>() * px);
arg.a0[id] = actual;
val = actual.abs2();
}
Real *smem = SharedMemory<Real>();
smem[threadIdx.x] = val;
__syncthreads();
//Only one active warp do the reduction
//Bocks are always multiple of the warp size!
if(blockDim.x > WARP_SIZE && threadIdx.x < WARP_SIZE){
for(uint s = 1; s < blockDim.x / WARP_SIZE; s++)
smem[threadIdx.x] += smem[threadIdx.x + WARP_SIZE * s];
}
//__syncthreads(); //No need to synchronize inside warp!!!!
//One thread do the warp reduction
if(threadIdx.x == 0 ) {
Real sum = smem[0];
for(uint s = 1; s < WARP_SIZE; s++)
sum += smem[s];
CudaAtomicAdd(arg.d_sum, sum);
}
}
template <class Real>
void RK3<Real>::setup(complex *a0_, complex *a1_, complex *a2_, Real* d_sum, size_t nx_, size_t ny_) {
arg.a0 = a0_;
arg.a1 = a1_;
arg.a2 = a2_;
arg.d_sum = d_sum;
nx = nx_;
ny = ny_;
tuning = true;
tp.block = make_uint3(32,1,1);
tp.grid = make_uint3((nx*ny+tp.block.x-1)/tp.block.x, 1, 1);
typ = 0;
}
template <class Real>
void RK3<Real>::tune(){
CUDA_SAFE_CALL(cudaGetDevice( &dev));
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev));
//Timer t0;
uint3 block = make_uint3(32,1,1);
uint3 grid = make_uint3((nx*ny+block.x-1)/block.x, 1, 1);
size_t shared_bytes = 0;
if(typ>=2) shared_bytes = (block.x + 2)*(block.y+2) * sizeof(complex);
else shared_bytes = (block.x)*(block.y) * sizeof(Real);
tp.time = 9999999999.0;
size_t size = nx*ny * sizeof(complex);
CUDA_SAFE_CALL(cudaMalloc(&tmp, size));
CUDA_SAFE_CALL(cudaMemcpy(tmp, arg.a0, size, cudaMemcpyDeviceToDevice));
cudaError_t error;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float elapsed_time;
name = typeid(*this).name();
name += "_" + ToString(typ);
while(tuning){
cudaDeviceSynchronize();
cudaGetLastError(); // clear error counter
//t0.start();
cudaEventRecord(start, 0);
callKernel(grid, block, shared_bytes);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed_time, start, end);
cudaDeviceSynchronize();
error = cudaGetLastError();
{ // check that error state is cleared
cudaDeviceSynchronize();
cudaError_t error1 = cudaGetLastError();
if (error1 != cudaSuccess){
printf("Failed to clear error state %s\n", cudaGetErrorString(error1));
exit(1);
}
}
if( tp.time > elapsed_time && (error == cudaSuccess) ){
tp.block = block;
tp.grid = grid;
tp.shared_bytes = shared_bytes;
tp.time = elapsed_time;
}
if(getVerbosity() == DEBUG_VERBOSE){
if( (error == cudaSuccess) ) std::cout << name << ": block=(" << block.x << "," << block.y << ")=" << block.x*block.y << ", grid=(" << grid.x << "," << grid.y << "), smem=" << shared_bytes/(1024.) << " KB, time=" << elapsed_time << " ms, " << flops(elapsed_time) << " Gflop/s, " << bwdth(elapsed_time) << " GB/s" << std::endl;
else std::cout << name << ": block=(" << block.x << "," << block.y << ")=" << block.x*block.y << ", grid=(" << grid.x << "," << grid.y << "), smem=" << shared_bytes/(1024.) << " KB, error: " << cudaGetErrorString(error) << std::endl;
}
block.x += 32;
int blocksize = block.x * block.y;
if(typ>=2) shared_bytes = (block.x + 2)*(block.y+2) * sizeof(complex);
else shared_bytes = (block.x)*(block.y) * sizeof(Real);
grid = make_uint3((nx*ny+block.x-1)/block.x, 1, 1);
if(block.x > deviceProp.maxThreadsDim[0] || grid.x > deviceProp.maxGridSize[0] || blocksize > deviceProp.maxThreadsPerBlock) tuning = false;
if(block.y > deviceProp.maxThreadsDim[1] || grid.y > deviceProp.maxGridSize[1] || blocksize > deviceProp.maxThreadsPerBlock) tuning = false;
}
CUDA_SAFE_CALL(cudaEventDestroy( start));
CUDA_SAFE_CALL(cudaEventDestroy( end));
tuning = false;
CUDA_SAFE_CALL(cudaMemcpy(arg.a0, tmp, size, cudaMemcpyDeviceToDevice));
CUDA_SAFE_CALL(cudaFree(tmp));
if( getVerbosity() >= SUMMARIZE ) std::cout << name << ": block=(" << tp.block.x << "," << tp.block.y << ")=" << tp.block.x*tp.block.y << ", grid=(" << tp.grid.x << "," << tp.grid.y << "), smem=" << tp.shared_bytes/(1024.) << " KB, time=" << tp.time << " ms, " << flops(tp.time) << " Gflop/s, " << bwdth(tp.time) << " GB/s\n" << std::endl;
}
template <class Real>
void RK3<Real>::run(uint typ_){
typ = typ_;
std::string name1 = typeid(*this).name();
name1 += "_" + ToString(typ);
if(tuning || name != name1){
tuning = true;
tune();
}
CUDA_SAFE_CALL(cudaMemset(arg.d_sum, 0, sizeof(Real)));
callKernel(tp.grid, tp.block, tp.shared_bytes);
CUDA_SAFE_THREAD_SYNC();
CUT_CHECK_ERROR("RK3 failed.");
}
template <class Real>
void RK3<Real>::callKernel(uint3 grid, uint3 block, size_t smem){
switch(typ){
case 0:
rungekutta_step3<Real, false><<<grid, block, smem, 0>>>(arg);
break;
case 1:
rungekutta_step3<Real, true><<<grid, block, smem, 0>>>(arg);
break;
case 2:
rungekutta_step3_smem<Real, false><<<grid, block, smem, 0>>>(arg);
break;
case 3:
rungekutta_step3_smem<Real, true><<<grid, block, smem, 0>>>(arg);
break;
default:
rungekutta_step3<Real, false><<<grid, block, smem, 0>>>(arg);
break;
}
}
template <class Real>
float RK3<Real>::flops(float time_ms) const{
return flop() * 1.0e-6 / time_ms;
}
template <class Real>
long long RK3<Real>::flop() const{
return 86LL * (nx-2) * (ny-2);
}
template <class Real>
long long RK3<Real>::bytes() const{
return 7LL * (nx-2) * (ny-2) * sizeof(complex);
}
template <class Real>
float RK3<Real>::bwdth(float time_ms) const{
return bytes() * 1.0e-6 / time_ms;
}
template class RK3<float>;
template class RK3<double>;
|
d82d86aa06dcecc1a9933a71152b8b5ff52b826a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ENTRYWISE_SCALE_BIAS_LAYER_INSTANTIATE
#include "lbann/layers/learning/entrywise_scale_bias.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bsizex x bsizey x 1
*
* Grid dimensions: (height / bsizex) x (width / bsizey) x num_channels
*/
template <typename TensorDataType>
__global__ void fp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ scale,
const TensorDataType* __restrict__ bias) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto a = scale[row];
const auto b = bias[row];
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& x = input[row + col*input_ldim];
auto& y = output[row + col*output_ldim];
y = a * x + b;
}
}
}
/**
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (height / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim,
const TensorDataType* __restrict__ scale,
TensorDataType* __restrict__ gradient_wrt_scale,
TensorDataType* __restrict__ gradient_wrt_bias) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t row = gid; row < height; row += nthreads) {
const auto a = scale[row];
TensorDataType da{0}, db{0};
for (size_t col = 0; col < width; ++col) {
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = a * dy;
da += x * dy;
db += dy;
}
gradient_wrt_scale[row] = da;
gradient_wrt_bias[row] = db;
}
}
template <typename TensorDataType>
void fp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
El::Matrix<TensorDataType, El::Device::GPU>& local_output,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
const auto local_bias = El::LockedView(local_scale_bias,
El::ALL, El::IR(1));
// Apply entry-wise scale and bias
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
if (!local_input.IsEmpty()) {
constexpr size_t block_size_x = 256;
constexpr size_t block_size_y = 1;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
grid_dims.x = (local_height + block_size_x - 1) / block_size_x;
grid_dims.y = (local_width + block_size_y - 1) / block_size_y;
hipLaunchKernelGGL(( fp_kernel), dim3(grid_dims), dim3(block_dims), 0, hydrogen::cuda::GetDefaultStream(),
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
local_scale.LockedBuffer(),
local_bias.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
const El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_output,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_input,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_scale = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_bias = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(1));
// Compute gradients
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
El::Zero(local_gradient_wrt_scale_bias);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
hipLaunchKernelGGL(( bp_kernel) , dim3(grid_dims), dim3(block_dims), 0, hydrogen::cuda::GetDefaultStream(),
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(),
local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(),
local_scale.LockedBuffer(),
local_gradient_wrt_scale.Buffer(),
local_gradient_wrt_bias.Buffer());
}
}
} // namespace
// Template instantiation
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::fp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
fp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<LocalMatType&>(this->get_local_activations()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()));
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::bp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
auto& scale_bias = this->get_weights(0);
auto& gradient_wrt_scale_bias = *this->m_weights_gradient;
bp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<const LocalMatType&>(this->get_local_prev_error_signals()),
dynamic_cast<LocalMatType&>(this->get_local_error_signals()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()),
dynamic_cast<LocalMatType&>(gradient_wrt_scale_bias.Matrix()));
// Update optimizer with gradient
auto* opt = scale_bias.get_optimizer();
if (opt != nullptr) {
opt->add_to_gradient(gradient_wrt_scale_bias, TensorDataType{1}, true);
}
}
LBANN_LAYER_DEFAULT_BUILDER(entrywise_scale_bias)
#define PROTO(T) \
template class entrywise_scale_bias_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class entrywise_scale_bias_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
LBANN_LAYER_BUILDER_ETI(entrywise_scale_bias, T, El::Device::GPU)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| d82d86aa06dcecc1a9933a71152b8b5ff52b826a.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ENTRYWISE_SCALE_BIAS_LAYER_INSTANTIATE
#include "lbann/layers/learning/entrywise_scale_bias.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bsizex x bsizey x 1
*
* Grid dimensions: (height / bsizex) x (width / bsizey) x num_channels
*/
template <typename TensorDataType>
__global__ void fp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ scale,
const TensorDataType* __restrict__ bias) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto a = scale[row];
const auto b = bias[row];
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& x = input[row + col*input_ldim];
auto& y = output[row + col*output_ldim];
y = a * x + b;
}
}
}
/**
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (height / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim,
const TensorDataType* __restrict__ scale,
TensorDataType* __restrict__ gradient_wrt_scale,
TensorDataType* __restrict__ gradient_wrt_bias) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t row = gid; row < height; row += nthreads) {
const auto a = scale[row];
TensorDataType da{0}, db{0};
for (size_t col = 0; col < width; ++col) {
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = a * dy;
da += x * dy;
db += dy;
}
gradient_wrt_scale[row] = da;
gradient_wrt_bias[row] = db;
}
}
template <typename TensorDataType>
void fp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
El::Matrix<TensorDataType, El::Device::GPU>& local_output,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
const auto local_bias = El::LockedView(local_scale_bias,
El::ALL, El::IR(1));
// Apply entry-wise scale and bias
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
if (!local_input.IsEmpty()) {
constexpr size_t block_size_x = 256;
constexpr size_t block_size_y = 1;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
grid_dims.x = (local_height + block_size_x - 1) / block_size_x;
grid_dims.y = (local_width + block_size_y - 1) / block_size_y;
fp_kernel<<<grid_dims, block_dims, 0, hydrogen::cuda::GetDefaultStream()>>>(
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
local_scale.LockedBuffer(),
local_bias.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
const El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_output,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_input,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_scale = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_bias = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(1));
// Compute gradients
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
El::Zero(local_gradient_wrt_scale_bias);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
bp_kernel <<<grid_dims, block_dims, 0, hydrogen::cuda::GetDefaultStream()>>>(
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(),
local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(),
local_scale.LockedBuffer(),
local_gradient_wrt_scale.Buffer(),
local_gradient_wrt_bias.Buffer());
}
}
} // namespace
// Template instantiation
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::fp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
fp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<LocalMatType&>(this->get_local_activations()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()));
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::bp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
auto& scale_bias = this->get_weights(0);
auto& gradient_wrt_scale_bias = *this->m_weights_gradient;
bp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<const LocalMatType&>(this->get_local_prev_error_signals()),
dynamic_cast<LocalMatType&>(this->get_local_error_signals()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()),
dynamic_cast<LocalMatType&>(gradient_wrt_scale_bias.Matrix()));
// Update optimizer with gradient
auto* opt = scale_bias.get_optimizer();
if (opt != nullptr) {
opt->add_to_gradient(gradient_wrt_scale_bias, TensorDataType{1}, true);
}
}
LBANN_LAYER_DEFAULT_BUILDER(entrywise_scale_bias)
#define PROTO(T) \
template class entrywise_scale_bias_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class entrywise_scale_bias_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
LBANN_LAYER_BUILDER_ETI(entrywise_scale_bias, T, El::Device::GPU)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
f5b4beac4b3e8c37be621543f35f5fbba5e8c687.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Includes, system
#include <stdio.h>
// Includes CUDA
#include <hip/hip_runtime.h>
#include <cuda/barrier>
#include <hip/hip_cooperative_groups.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
namespace cg = cooperative_groups;
#if __CUDA_ARCH__ >= 700
template <bool writeSquareRoot> __device__ void reduceBlockData(cuda::barrier<cuda::thread_scope_block> &barrier,
cg::thread_block_tile<32> &tile32, double &threadSum, double *result)
{
extern __shared__ double tmp[];
#pragma unroll
for (int offset = tile32.size()/2; offset > 0; offset /= 2)
{
threadSum += tile32.shfl_down(threadSum, offset);
}
if (tile32.thread_rank() == 0)
{
tmp[tile32.meta_group_rank()] = threadSum;
}
auto token = barrier.arrive();
barrier.wait(std::move(token));
// The warp 0 will perform last round of reduction
if (tile32.meta_group_rank() == 0) {
double beta = tile32.thread_rank() < tile32.meta_group_size() ? tmp[tile32.thread_rank()] : 0.0;
#pragma unroll
for (int offset = tile32.size()/2; offset > 0; offset /= 2)
{
beta += tile32.shfl_down(beta, offset);
}
if (tile32.thread_rank() == 0)
{
if (writeSquareRoot)
*result = sqrt(beta);
else
*result = beta;
}
}
}
#endif
__global__ void normVecByDotProductAWBarrier(float *vecA, float *vecB, double *partialResults, int size)
{
#if __CUDA_ARCH__ >= 700
#pragma diag_suppress static_var_with_dynamic_init
cg::thread_block cta = cg::this_thread_block();
cg::grid_group grid = cg::this_grid();;
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
__shared__ cuda::barrier<cuda::thread_scope_block> barrier;
if (threadIdx.x == 0) {
init(&barrier, blockDim.x);
}
cg::sync(cta);
double threadSum = 0.0;
for (int i = grid.thread_rank(); i < size; i += grid.size())
{
threadSum += (double) (vecA[i] * vecB[i]);
}
// Each thread block performs reduction of partial dotProducts and writes to
// global mem.
reduceBlockData<false>(barrier, tile32, threadSum, &partialResults[blockIdx.x]);
cg::sync(grid);
// One block performs the final summation of partial dot products
// of all the thread blocks and writes the sqrt of final dot product.
if (blockIdx.x == 0)
{
threadSum = 0.0;
for (int i = cta.thread_rank(); i < gridDim.x; i += cta.size())
{
threadSum += partialResults[i];
}
reduceBlockData<true>(barrier, tile32, threadSum, &partialResults[0]);
}
cg::sync(grid);
const double finalValue = partialResults[0];
// Perform normalization of vecA & vecB.
for (int i = grid.thread_rank(); i < size; i += grid.size())
{
vecA[i] = (float)vecA[i] / finalValue;
vecB[i] = (float)vecB[i] / finalValue;
}
#endif
}
int runNormVecByDotProductAWBarrier(int argc, char **argv, int deviceId);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
int dev = findCudaDevice(argc, (const char **)argv);
int major = 0;
checkCudaErrors(hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, dev));
// Arrive-Wait Barrier require a GPU of Volta (SM7X) architecture or higher.
if (major < 7) {
printf("simpleAWBarrier requires SM 7.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
int supportsCooperativeLaunch = 0;
checkCudaErrors(hipDeviceGetAttribute(&supportsCooperativeLaunch, hipDeviceAttributeCooperativeLaunch, dev));
if (!supportsCooperativeLaunch)
{
printf("\nSelected GPU (%d) does not support Cooperative Kernel Launch, Waiving the run\n", dev);
exit(EXIT_WAIVED);
}
int testResult = runNormVecByDotProductAWBarrier(argc, argv, dev);
printf("%s completed, returned %s\n", argv[0], testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
int runNormVecByDotProductAWBarrier(int argc, char **argv, int deviceId)
{
float *vecA, *d_vecA;
float *vecB, *d_vecB;
double *d_partialResults;
int size = 10000000;
checkCudaErrors(hipHostMalloc(&vecA, sizeof(float) * size));
checkCudaErrors(hipHostMalloc(&vecB, sizeof(float) * size));
checkCudaErrors(hipMalloc(&d_vecA, sizeof(float)*size));
checkCudaErrors(hipMalloc(&d_vecB, sizeof(float)*size));
float baseVal = 2.0;
for (int i = 0; i < size; i++)
{
vecA[i] = vecB[i] = baseVal;
}
hipStream_t stream;
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
checkCudaErrors(hipMemcpyAsync(d_vecA, vecA, sizeof(float)*size, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(d_vecB, vecB, sizeof(float)*size, hipMemcpyHostToDevice, stream));
// Kernel configuration, where a one-dimensional
// grid and one-dimensional blocks are configured.
int minGridSize = 0, blockSize = 0;
checkCudaErrors(hipOccupancyMaxPotentialBlockSize(
&minGridSize,
&blockSize,
(void*)normVecByDotProductAWBarrier,
0,
size));
int smemSize = ((blockSize/32)+1) * sizeof(double);
int numBlocksPerSm = 0;
checkCudaErrors(hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, normVecByDotProductAWBarrier, blockSize, smemSize));
int multiProcessorCount = 0;
checkCudaErrors(hipDeviceGetAttribute(&multiProcessorCount, hipDeviceAttributeMultiprocessorCount, deviceId));
minGridSize = multiProcessorCount * numBlocksPerSm;
checkCudaErrors(hipMalloc(&d_partialResults, minGridSize*sizeof(double)));
printf("Launching normVecByDotProductAWBarrier kernel with numBlocks = %d blockSize = %d\n", minGridSize, blockSize);
dim3 dimGrid(minGridSize, 1, 1), dimBlock(blockSize, 1, 1);
void *kernelArgs[] = {
(void*)&d_vecA,
(void*)&d_vecB,
(void*)&d_partialResults,
(void*)&size
};
checkCudaErrors(hipLaunchCooperativeKernel((void *)normVecByDotProductAWBarrier, dimGrid, dimBlock, kernelArgs, smemSize, stream));
checkCudaErrors(hipMemcpyAsync(vecA, d_vecA, sizeof(float)*size, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
float expectedResult = (baseVal / sqrt(size*baseVal*baseVal));
unsigned int matches = 0;
for (int i=0; i < size; i++)
{
if ((vecA[i] - expectedResult) > 0.00001)
{
printf("mismatch at i = %d\n", i);
break;
}
else
{
matches++;
}
}
printf("Result = %s\n", matches == size ? "PASSED" : "FAILED");
checkCudaErrors(hipFree(d_vecA));
checkCudaErrors(hipFree(d_vecB));
checkCudaErrors(hipFree(d_partialResults));
checkCudaErrors(hipHostFree(vecA));
checkCudaErrors(hipHostFree(vecB));
return matches == size;
}
| f5b4beac4b3e8c37be621543f35f5fbba5e8c687.cu | /*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Includes, system
#include <stdio.h>
// Includes CUDA
#include <cuda_runtime.h>
#include <cuda/barrier>
#include <cooperative_groups.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
namespace cg = cooperative_groups;
#if __CUDA_ARCH__ >= 700
template <bool writeSquareRoot> __device__ void reduceBlockData(cuda::barrier<cuda::thread_scope_block> &barrier,
cg::thread_block_tile<32> &tile32, double &threadSum, double *result)
{
extern __shared__ double tmp[];
#pragma unroll
for (int offset = tile32.size()/2; offset > 0; offset /= 2)
{
threadSum += tile32.shfl_down(threadSum, offset);
}
if (tile32.thread_rank() == 0)
{
tmp[tile32.meta_group_rank()] = threadSum;
}
auto token = barrier.arrive();
barrier.wait(std::move(token));
// The warp 0 will perform last round of reduction
if (tile32.meta_group_rank() == 0) {
double beta = tile32.thread_rank() < tile32.meta_group_size() ? tmp[tile32.thread_rank()] : 0.0;
#pragma unroll
for (int offset = tile32.size()/2; offset > 0; offset /= 2)
{
beta += tile32.shfl_down(beta, offset);
}
if (tile32.thread_rank() == 0)
{
if (writeSquareRoot)
*result = sqrt(beta);
else
*result = beta;
}
}
}
#endif
__global__ void normVecByDotProductAWBarrier(float *vecA, float *vecB, double *partialResults, int size)
{
#if __CUDA_ARCH__ >= 700
#pragma diag_suppress static_var_with_dynamic_init
cg::thread_block cta = cg::this_thread_block();
cg::grid_group grid = cg::this_grid();;
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
__shared__ cuda::barrier<cuda::thread_scope_block> barrier;
if (threadIdx.x == 0) {
init(&barrier, blockDim.x);
}
cg::sync(cta);
double threadSum = 0.0;
for (int i = grid.thread_rank(); i < size; i += grid.size())
{
threadSum += (double) (vecA[i] * vecB[i]);
}
// Each thread block performs reduction of partial dotProducts and writes to
// global mem.
reduceBlockData<false>(barrier, tile32, threadSum, &partialResults[blockIdx.x]);
cg::sync(grid);
// One block performs the final summation of partial dot products
// of all the thread blocks and writes the sqrt of final dot product.
if (blockIdx.x == 0)
{
threadSum = 0.0;
for (int i = cta.thread_rank(); i < gridDim.x; i += cta.size())
{
threadSum += partialResults[i];
}
reduceBlockData<true>(barrier, tile32, threadSum, &partialResults[0]);
}
cg::sync(grid);
const double finalValue = partialResults[0];
// Perform normalization of vecA & vecB.
for (int i = grid.thread_rank(); i < size; i += grid.size())
{
vecA[i] = (float)vecA[i] / finalValue;
vecB[i] = (float)vecB[i] / finalValue;
}
#endif
}
int runNormVecByDotProductAWBarrier(int argc, char **argv, int deviceId);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
printf("%s starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
int dev = findCudaDevice(argc, (const char **)argv);
int major = 0;
checkCudaErrors(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, dev));
// Arrive-Wait Barrier require a GPU of Volta (SM7X) architecture or higher.
if (major < 7) {
printf("simpleAWBarrier requires SM 7.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
int supportsCooperativeLaunch = 0;
checkCudaErrors(cudaDeviceGetAttribute(&supportsCooperativeLaunch, cudaDevAttrCooperativeLaunch, dev));
if (!supportsCooperativeLaunch)
{
printf("\nSelected GPU (%d) does not support Cooperative Kernel Launch, Waiving the run\n", dev);
exit(EXIT_WAIVED);
}
int testResult = runNormVecByDotProductAWBarrier(argc, argv, dev);
printf("%s completed, returned %s\n", argv[0], testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
int runNormVecByDotProductAWBarrier(int argc, char **argv, int deviceId)
{
float *vecA, *d_vecA;
float *vecB, *d_vecB;
double *d_partialResults;
int size = 10000000;
checkCudaErrors(cudaMallocHost(&vecA, sizeof(float) * size));
checkCudaErrors(cudaMallocHost(&vecB, sizeof(float) * size));
checkCudaErrors(cudaMalloc(&d_vecA, sizeof(float)*size));
checkCudaErrors(cudaMalloc(&d_vecB, sizeof(float)*size));
float baseVal = 2.0;
for (int i = 0; i < size; i++)
{
vecA[i] = vecB[i] = baseVal;
}
cudaStream_t stream;
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
checkCudaErrors(cudaMemcpyAsync(d_vecA, vecA, sizeof(float)*size, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(d_vecB, vecB, sizeof(float)*size, cudaMemcpyHostToDevice, stream));
// Kernel configuration, where a one-dimensional
// grid and one-dimensional blocks are configured.
int minGridSize = 0, blockSize = 0;
checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(
&minGridSize,
&blockSize,
(void*)normVecByDotProductAWBarrier,
0,
size));
int smemSize = ((blockSize/32)+1) * sizeof(double);
int numBlocksPerSm = 0;
checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, normVecByDotProductAWBarrier, blockSize, smemSize));
int multiProcessorCount = 0;
checkCudaErrors(cudaDeviceGetAttribute(&multiProcessorCount, cudaDevAttrMultiProcessorCount, deviceId));
minGridSize = multiProcessorCount * numBlocksPerSm;
checkCudaErrors(cudaMalloc(&d_partialResults, minGridSize*sizeof(double)));
printf("Launching normVecByDotProductAWBarrier kernel with numBlocks = %d blockSize = %d\n", minGridSize, blockSize);
dim3 dimGrid(minGridSize, 1, 1), dimBlock(blockSize, 1, 1);
void *kernelArgs[] = {
(void*)&d_vecA,
(void*)&d_vecB,
(void*)&d_partialResults,
(void*)&size
};
checkCudaErrors(cudaLaunchCooperativeKernel((void *)normVecByDotProductAWBarrier, dimGrid, dimBlock, kernelArgs, smemSize, stream));
checkCudaErrors(cudaMemcpyAsync(vecA, d_vecA, sizeof(float)*size, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
float expectedResult = (baseVal / sqrt(size*baseVal*baseVal));
unsigned int matches = 0;
for (int i=0; i < size; i++)
{
if ((vecA[i] - expectedResult) > 0.00001)
{
printf("mismatch at i = %d\n", i);
break;
}
else
{
matches++;
}
}
printf("Result = %s\n", matches == size ? "PASSED" : "FAILED");
checkCudaErrors(cudaFree(d_vecA));
checkCudaErrors(cudaFree(d_vecB));
checkCudaErrors(cudaFree(d_partialResults));
checkCudaErrors(cudaFreeHost(vecA));
checkCudaErrors(cudaFreeHost(vecB));
return matches == size;
}
|
5f13cc9a4c45c446a1002c77bd0cdddd91d6d093.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx_x = (blockIdx.x*blockDim.x + threadIdx.x);
int idx_y = (blockIdx.y*blockDim.y + threadIdx.y);
if( (idx_x < numRows) && (idx_y < numCols )) {
uchar4 rgba = rgbaImage[idx_x * numCols + idx_y];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[idx_x * numCols + idx_y] = channelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16,32,1);
int nrow = numRows/blockSize.x + 1;
int ncol = numCols/blockSize.y + 1;
const dim3 gridSize(nrow, ncol, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 5f13cc9a4c45c446a1002c77bd0cdddd91d6d093.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx_x = (blockIdx.x*blockDim.x + threadIdx.x);
int idx_y = (blockIdx.y*blockDim.y + threadIdx.y);
if( (idx_x < numRows) && (idx_y < numCols )) {
uchar4 rgba = rgbaImage[idx_x * numCols + idx_y];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[idx_x * numCols + idx_y] = channelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16,32,1);
int nrow = numRows/blockSize.x + 1;
int ncol = numCols/blockSize.y + 1;
const dim3 gridSize(nrow, ncol, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
5816328dc5b0d4d05ac3933ecdd0286d5fafc930.hip | // !!! This is a file automatically generated by hipify!!!
#include "Util.h"
#include <direct.h>
int main()
{
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
//RunByModelSerial();// FOR ROY!!!!!!!!!!!!!!! // Run, output VHot and look
/*Add to first kernel
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
*/
//printf("we are in the %s directory\n",_getcwd( NULL, 0 ));
RunByModelP();
return 0;
}
| 5816328dc5b0d4d05ac3933ecdd0286d5fafc930.cu |
#include "Util.h"
#include <direct.h>
int main()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
//RunByModelSerial();// FOR ROY!!!!!!!!!!!!!!! // Run, output VHot and look
/*Add to first kernel
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
*/
//printf("we are in the %s directory\n",_getcwd( NULL, 0 ));
RunByModelP();
return 0;
}
|
21374998d4933552c8dcb989d9870b981d95025a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include ".\lodepng.h"
#include "wm.h"
using namespace std;
#include <iostream>
#include <time.h>
const int Wsize = 3;
__device__ unsigned char clamp(float result) {
if (result < 0) {
result = 0;
}
if (result > 255) {
result = 255;
}
unsigned char new_result = (unsigned char)result;
return new_result;
}
__global__ void threadProcess(int new_height, int new_width, unsigned char* new_image, unsigned char* old_image, float* weights, int numBlocks, int numThreads)
{
int start, end;
int total_size = new_width * new_height;
int thread_size = total_size / (numThreads * numBlocks);
int pixels_lost = (Wsize / 2) * 2;
start = thread_size * (blockIdx.x * blockDim.x + threadIdx.x);
end = start + thread_size;
//printf("Start: %d \t End: %d \n", start, end);
for (int i = start; i < end; i++)
{
float r, g, b;
r = 0;
g = 0;
b = 0;
int x_new = i % new_width;
int y_new = i / new_width;
//printf("X: %d \t Y: %d \n", x_new, y_new);
int old_coord = y_new*(new_width + pixels_lost) + x_new;
// height
for (int wY = 0; wY < Wsize; wY++)
{
// width
for (int wX = 0; wX < Wsize; wX++)
{
float weight = weights[wX + wY * Wsize];
int pixel_index = old_coord + wY*(new_width + pixels_lost) + wX;
// printf("Pixel index: %d \n", pixel_index);
r += old_image[4 * pixel_index] * weight;
g += old_image[4 * pixel_index + 1] * weight;
b += old_image[4 * pixel_index + 2] * weight;
}
}
new_image[4*i] = clamp(r);
new_image[4*i+1] = clamp(g);
new_image[4*i+2] = clamp(b);
new_image[4*i+3] = old_image[4*(old_coord + Wsize/2 + (Wsize/2)*(new_width + pixels_lost)) + 3];
}
}
void pre_thread_process(char* input_filename, char* output_filename, int number_threads, float* wm)
{
unsigned error;
unsigned char* image, * new_image, * cuda_image, * cuda_new_image;
unsigned width, height;
float* device_weights;
error = lodepng_decode32_file(&image, &width, &height, input_filename);
if (error) printf("error %u: %s\n", error, lodepng_error_text(error));
//ie 3/2=1 1*2 = 2 5/2= 2 2* 2 = 4 7/2 = 3 3*2 = 6
int lost_pixels = (Wsize / 2) * 2;
int new_width = width - lost_pixels;
int new_height = height - lost_pixels;
new_image = (unsigned char*)malloc(new_width* new_height * 4 * sizeof(unsigned char));
hipMalloc((void**)& cuda_image, width * height * 4 * sizeof(unsigned char));
hipMalloc((void**)& device_weights, Wsize * Wsize * sizeof(float));
hipMemcpy(cuda_image, image, width * height * 4 * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(device_weights, wm, Wsize * Wsize * sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)& cuda_new_image, new_width * new_height * 4 * sizeof(unsigned char));
int max_thread_power = 11;
//for (int i = 0; i <= max_thread_power; i++)
//{
int number_of_threads =128;
int block_number = number_of_threads / 1024 + 1;
int threads_per_block = number_of_threads / block_number;
clock_t begin = clock();
threadProcess << < block_number, threads_per_block >> > (new_height, new_width, cuda_new_image, cuda_image, device_weights, block_number, threads_per_block);
hipDeviceSynchronize();
hipMemcpy(new_image, cuda_new_image, (new_width) * (new_height) * 4 * sizeof(unsigned char), hipMemcpyDeviceToHost);
lodepng_encode32_file(output_filename, new_image, new_width, new_height); //make the new image from the data
float time_spent = (float)(clock() - begin) / CLOCKS_PER_SEC;
cout << "\n Number of threads: " << number_of_threads << "\t Run time: " << scientific << time_spent;
//}
free(image);
free(new_image);
hipFree(cuda_image);
hipFree(cuda_new_image);
hipFree(device_weights);
}
int main(int argc, char* argv[])
{
char* input_filename = "Input2.png"; //argv[1];
char* output_filename = "output.png"; // argv[2];
float* wm = (float*)malloc(Wsize * Wsize * sizeof(float));
// Flattening
for (int i = 0; i < Wsize; i++) {
for (int j = 0; j < Wsize; j++) {
// change argument here for different weight matrices
wm[i * Wsize + j] = w3[i][j];
}
}
pre_thread_process(input_filename, output_filename, 128, wm);
return 0;
}
| 21374998d4933552c8dcb989d9870b981d95025a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include ".\lodepng.h"
#include "wm.h"
using namespace std;
#include <iostream>
#include <time.h>
const int Wsize = 3;
__device__ unsigned char clamp(float result) {
if (result < 0) {
result = 0;
}
if (result > 255) {
result = 255;
}
unsigned char new_result = (unsigned char)result;
return new_result;
}
__global__ void threadProcess(int new_height, int new_width, unsigned char* new_image, unsigned char* old_image, float* weights, int numBlocks, int numThreads)
{
int start, end;
int total_size = new_width * new_height;
int thread_size = total_size / (numThreads * numBlocks);
int pixels_lost = (Wsize / 2) * 2;
start = thread_size * (blockIdx.x * blockDim.x + threadIdx.x);
end = start + thread_size;
//printf("Start: %d \t End: %d \n", start, end);
for (int i = start; i < end; i++)
{
float r, g, b;
r = 0;
g = 0;
b = 0;
int x_new = i % new_width;
int y_new = i / new_width;
//printf("X: %d \t Y: %d \n", x_new, y_new);
int old_coord = y_new*(new_width + pixels_lost) + x_new;
// height
for (int wY = 0; wY < Wsize; wY++)
{
// width
for (int wX = 0; wX < Wsize; wX++)
{
float weight = weights[wX + wY * Wsize];
int pixel_index = old_coord + wY*(new_width + pixels_lost) + wX;
// printf("Pixel index: %d \n", pixel_index);
r += old_image[4 * pixel_index] * weight;
g += old_image[4 * pixel_index + 1] * weight;
b += old_image[4 * pixel_index + 2] * weight;
}
}
new_image[4*i] = clamp(r);
new_image[4*i+1] = clamp(g);
new_image[4*i+2] = clamp(b);
new_image[4*i+3] = old_image[4*(old_coord + Wsize/2 + (Wsize/2)*(new_width + pixels_lost)) + 3];
}
}
void pre_thread_process(char* input_filename, char* output_filename, int number_threads, float* wm)
{
unsigned error;
unsigned char* image, * new_image, * cuda_image, * cuda_new_image;
unsigned width, height;
float* device_weights;
error = lodepng_decode32_file(&image, &width, &height, input_filename);
if (error) printf("error %u: %s\n", error, lodepng_error_text(error));
//ie 3/2=1 1*2 = 2 5/2= 2 2* 2 = 4 7/2 = 3 3*2 = 6
int lost_pixels = (Wsize / 2) * 2;
int new_width = width - lost_pixels;
int new_height = height - lost_pixels;
new_image = (unsigned char*)malloc(new_width* new_height * 4 * sizeof(unsigned char));
cudaMalloc((void**)& cuda_image, width * height * 4 * sizeof(unsigned char));
cudaMalloc((void**)& device_weights, Wsize * Wsize * sizeof(float));
cudaMemcpy(cuda_image, image, width * height * 4 * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(device_weights, wm, Wsize * Wsize * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)& cuda_new_image, new_width * new_height * 4 * sizeof(unsigned char));
int max_thread_power = 11;
//for (int i = 0; i <= max_thread_power; i++)
//{
int number_of_threads =128;
int block_number = number_of_threads / 1024 + 1;
int threads_per_block = number_of_threads / block_number;
clock_t begin = clock();
threadProcess << < block_number, threads_per_block >> > (new_height, new_width, cuda_new_image, cuda_image, device_weights, block_number, threads_per_block);
cudaDeviceSynchronize();
cudaMemcpy(new_image, cuda_new_image, (new_width) * (new_height) * 4 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
lodepng_encode32_file(output_filename, new_image, new_width, new_height); //make the new image from the data
float time_spent = (float)(clock() - begin) / CLOCKS_PER_SEC;
cout << "\n Number of threads: " << number_of_threads << "\t Run time: " << scientific << time_spent;
//}
free(image);
free(new_image);
cudaFree(cuda_image);
cudaFree(cuda_new_image);
cudaFree(device_weights);
}
int main(int argc, char* argv[])
{
char* input_filename = "Input2.png"; //argv[1];
char* output_filename = "output.png"; // argv[2];
float* wm = (float*)malloc(Wsize * Wsize * sizeof(float));
// Flattening
for (int i = 0; i < Wsize; i++) {
for (int j = 0; j < Wsize; j++) {
// change argument here for different weight matrices
wm[i * Wsize + j] = w3[i][j];
}
}
pre_thread_process(input_filename, output_filename, 128, wm);
return 0;
}
|
6efd69aee6186dd1b703e86385bb207f54426e7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "stdio.h"
#include "utils.h"
#include <thrust/host_vector.h>
__global__ void cal_mask( bool* d_mask, uchar4* d_source , int N){
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) return;
if (d_source[n].x == 255 && d_source[n].y == 255 && d_source[n].z == 255) {
d_mask[n] = 0; //outside the mask
}
else {
d_mask[n] = 1; // inside the mask
}
}
__global__ void isInside( int* d_inside, bool* d_mask , int N , int col_N){
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
if (d_mask[n] == true) {
// is in mask, assume there are pixel around it.(never out of bound)
if (d_mask[n-1] == true && d_mask[n+1] ==true && d_mask[n-col_N] == true && d_mask[n+col_N]== true) {
// it's inside mask
d_inside[n] = 1;
}
else {
d_inside[n] = 0;
}
}
else {
d_inside[n] = 8;
}
}
__global__ void separateChannels(float* red, float* blue, float* green, uchar4* d_source, int N) {
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
red[n] = d_source[n].x;
blue[n] = d_source[n].y;
green[n]= d_source[n].z;
}
__global__ void jacobi( float* d_out, float* d_in , float* d_source, float* d_dest, int* d_inside, bool* d_mask , int N, int col) {
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
if (d_mask[n] == 1) {
if (d_inside[n] ==1 ) {
float sum1 = 0.0f;
float neighbor = 4.0f;
float sum2 = 0.0f;
if (d_inside[n-1] == 1) {
sum1 += d_in[n-1];
sum2 += d_source[n] - d_source[n-1];
}
else{
// n-1 on the border
sum1 += d_dest[n-1];
sum2 += d_source[n] - d_source[n-1];
}
if (d_inside[n+1] == 1) {
sum1 += d_in[n+1];
sum2 += d_source[n] - d_source[n+1];
}
else{
sum1 +=d_dest[n+1];
sum2 += d_source[n] - d_source[n+1];
}
if (d_inside[n+col] == 1) {
sum1 += d_in[n+col];
sum2 += d_source[n] - d_source[n+col];
}
else{
sum1 +=d_dest[n+col];
sum2 += d_source[n] - d_source[n+col];
}
if (d_inside[n-col] == 1) {
sum1 += d_in[n-col];
sum2 += d_source[n] - d_source[n-col];
}
else{
sum1 +=d_dest[n-col];
sum2 += d_source[n] - d_source[n-col];
}
float newVal = (sum1 + sum2) / neighbor ;
d_out[n] = min(255.f,max(0.f,newVal));
}
}
}
__global__ void storeBack( uchar4* d_dest, float* red, float* blue, float* green, int* d_inside , bool* d_mask, int N){
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
if (d_mask[n] == 1) {
if (d_inside[n] == 1) {
d_dest[n].x = char(red[n] );
d_dest[n].y = char(blue[n] );
d_dest[n].z = char(green[n]);
}
}
}
void your_blend(
//Input
const uchar4* const h_sourceImg, const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg,
//Output
uchar4* const h_blendedImg) {
printf("row: %lu, col: %lu\n", numRowsSource, numColsSource);
int N = numRowsSource * numColsSource;
bool* d_mask;
int* d_inside;
uchar4* d_source;
uchar4* d_dest;
float* d_source_red;
float* d_source_blue;
float* d_source_green;
float *d_dest_red;
float *d_dest_blue;
float *d_dest_green;
float *red;
float *blue;
float *green;
float *red2;
float *blue2;
float *green2;
checkCudaErrors(hipMalloc((void **)&d_mask , N * sizeof(bool ) ) );
checkCudaErrors(hipMalloc((void **)&d_inside , N * sizeof(int ) ) );
checkCudaErrors(hipMalloc((void **)&d_source , N * sizeof(uchar4) ) );
checkCudaErrors(hipMalloc((void **)&d_dest , N * sizeof(uchar4) ) );
checkCudaErrors(hipMalloc((void **)&d_source_red , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&d_source_blue , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&d_source_green , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&d_dest_red , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&d_dest_blue , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&d_dest_green , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&red , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&blue , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&green , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&red2 , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&blue2 , N * sizeof(float ) ) );
checkCudaErrors(hipMalloc((void **)&green2 , N * sizeof(float ) ) );
checkCudaErrors(hipMemcpy(d_source,h_sourceImg,N* sizeof(uchar4), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_dest ,h_destImg ,N* sizeof(uchar4), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cal_mask), dim3(N/1024 + 1) , dim3(1024) , 0, 0, d_mask, d_source, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( isInside), dim3(N/1024 + 1) , dim3(1024) , 0, 0, d_inside, d_mask, N, numColsSource);
hipLaunchKernelGGL(( separateChannels), dim3(N/1024 + 1), dim3(1024), 0, 0, d_source_red, d_source_blue, d_source_green, d_source, N);
hipLaunchKernelGGL(( separateChannels), dim3(N/1024 + 1), dim3(1024), 0, 0, d_dest_red, d_dest_blue, d_dest_green, d_dest, N);
hipLaunchKernelGGL(( separateChannels), dim3(N/1024 + 1), dim3(1024), 0, 0, red, blue, green, d_source, N);
hipDeviceSynchronize();
for (int i =0; i < 400; i++) {
hipLaunchKernelGGL(( jacobi), dim3(N/1024 + 1), dim3(1024), 0, 0, red2, red, d_source_red, d_dest_red, d_inside, d_mask, N, numColsSource);
hipLaunchKernelGGL(( jacobi), dim3(N/1024 + 1), dim3(1024), 0, 0, blue2, blue, d_source_blue, d_dest_blue,d_inside, d_mask, N, numColsSource);
hipLaunchKernelGGL(( jacobi), dim3(N/1024 + 1), dim3(1024), 0, 0, green2, green, d_source_green, d_dest_green,d_inside, d_mask, N, numColsSource);
hipDeviceSynchronize();
hipLaunchKernelGGL(( jacobi), dim3(N/1024 + 1), dim3(1024), 0, 0, red, red2, d_source_red, d_dest_red, d_inside, d_mask, N, numColsSource);
hipLaunchKernelGGL(( jacobi), dim3(N/1024 + 1), dim3(1024), 0, 0, blue, blue2, d_source_blue, d_dest_blue, d_inside, d_mask, N, numColsSource);
hipLaunchKernelGGL(( jacobi), dim3(N/1024 + 1), dim3(1024), 0, 0, green, green2, d_source_green, d_dest_green, d_inside, d_mask, N, numColsSource);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( storeBack), dim3(N/1024 + 1), dim3(1024) , 0, 0, d_dest, red, blue, green, d_inside, d_mask , N);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(h_blendedImg,d_dest,N* sizeof(uchar4), hipMemcpyDeviceToHost));
checkCudaErrors(hipGetLastError());
} | 6efd69aee6186dd1b703e86385bb207f54426e7f.cu | //Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "stdio.h"
#include "utils.h"
#include <thrust/host_vector.h>
__global__ void cal_mask( bool* d_mask, uchar4* d_source , int N){
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) return;
if (d_source[n].x == 255 && d_source[n].y == 255 && d_source[n].z == 255) {
d_mask[n] = 0; //outside the mask
}
else {
d_mask[n] = 1; // inside the mask
}
}
__global__ void isInside( int* d_inside, bool* d_mask , int N , int col_N){
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
if (d_mask[n] == true) {
// is in mask, assume there are pixel around it.(never out of bound)
if (d_mask[n-1] == true && d_mask[n+1] ==true && d_mask[n-col_N] == true && d_mask[n+col_N]== true) {
// it's inside mask
d_inside[n] = 1;
}
else {
d_inside[n] = 0;
}
}
else {
d_inside[n] = 8;
}
}
__global__ void separateChannels(float* red, float* blue, float* green, uchar4* d_source, int N) {
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
red[n] = d_source[n].x;
blue[n] = d_source[n].y;
green[n]= d_source[n].z;
}
__global__ void jacobi( float* d_out, float* d_in , float* d_source, float* d_dest, int* d_inside, bool* d_mask , int N, int col) {
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
if (d_mask[n] == 1) {
if (d_inside[n] ==1 ) {
float sum1 = 0.0f;
float neighbor = 4.0f;
float sum2 = 0.0f;
if (d_inside[n-1] == 1) {
sum1 += d_in[n-1];
sum2 += d_source[n] - d_source[n-1];
}
else{
// n-1 on the border
sum1 += d_dest[n-1];
sum2 += d_source[n] - d_source[n-1];
}
if (d_inside[n+1] == 1) {
sum1 += d_in[n+1];
sum2 += d_source[n] - d_source[n+1];
}
else{
sum1 +=d_dest[n+1];
sum2 += d_source[n] - d_source[n+1];
}
if (d_inside[n+col] == 1) {
sum1 += d_in[n+col];
sum2 += d_source[n] - d_source[n+col];
}
else{
sum1 +=d_dest[n+col];
sum2 += d_source[n] - d_source[n+col];
}
if (d_inside[n-col] == 1) {
sum1 += d_in[n-col];
sum2 += d_source[n] - d_source[n-col];
}
else{
sum1 +=d_dest[n-col];
sum2 += d_source[n] - d_source[n-col];
}
float newVal = (sum1 + sum2) / neighbor ;
d_out[n] = min(255.f,max(0.f,newVal));
}
}
}
__global__ void storeBack( uchar4* d_dest, float* red, float* blue, float* green, int* d_inside , bool* d_mask, int N){
int n = threadIdx.x + blockDim.x * blockIdx.x;
if (n >= N) {
return;
}
if (d_mask[n] == 1) {
if (d_inside[n] == 1) {
d_dest[n].x = char(red[n] );
d_dest[n].y = char(blue[n] );
d_dest[n].z = char(green[n]);
}
}
}
void your_blend(
//Input
const uchar4* const h_sourceImg, const size_t numRowsSource, const size_t numColsSource, const uchar4* const h_destImg,
//Output
uchar4* const h_blendedImg) {
printf("row: %lu, col: %lu\n", numRowsSource, numColsSource);
int N = numRowsSource * numColsSource;
bool* d_mask;
int* d_inside;
uchar4* d_source;
uchar4* d_dest;
float* d_source_red;
float* d_source_blue;
float* d_source_green;
float *d_dest_red;
float *d_dest_blue;
float *d_dest_green;
float *red;
float *blue;
float *green;
float *red2;
float *blue2;
float *green2;
checkCudaErrors(cudaMalloc((void **)&d_mask , N * sizeof(bool ) ) );
checkCudaErrors(cudaMalloc((void **)&d_inside , N * sizeof(int ) ) );
checkCudaErrors(cudaMalloc((void **)&d_source , N * sizeof(uchar4) ) );
checkCudaErrors(cudaMalloc((void **)&d_dest , N * sizeof(uchar4) ) );
checkCudaErrors(cudaMalloc((void **)&d_source_red , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&d_source_blue , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&d_source_green , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&d_dest_red , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&d_dest_blue , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&d_dest_green , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&red , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&blue , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&green , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&red2 , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&blue2 , N * sizeof(float ) ) );
checkCudaErrors(cudaMalloc((void **)&green2 , N * sizeof(float ) ) );
checkCudaErrors(cudaMemcpy(d_source,h_sourceImg,N* sizeof(uchar4), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_dest ,h_destImg ,N* sizeof(uchar4), cudaMemcpyHostToDevice));
cal_mask<<<N/1024 + 1 , 1024 >>>(d_mask, d_source, N);
cudaDeviceSynchronize();
isInside<<<N/1024 + 1 , 1024 >>>(d_inside, d_mask, N, numColsSource);
separateChannels<<<N/1024 + 1, 1024>>>(d_source_red, d_source_blue, d_source_green, d_source, N);
separateChannels<<<N/1024 + 1, 1024>>>(d_dest_red, d_dest_blue, d_dest_green, d_dest, N);
separateChannels<<<N/1024 + 1, 1024>>>(red, blue, green, d_source, N);
cudaDeviceSynchronize();
for (int i =0; i < 400; i++) {
jacobi<<<N/1024 + 1, 1024>>>(red2, red, d_source_red, d_dest_red, d_inside, d_mask, N, numColsSource);
jacobi<<<N/1024 + 1, 1024>>>(blue2, blue, d_source_blue, d_dest_blue,d_inside, d_mask, N, numColsSource);
jacobi<<<N/1024 + 1, 1024>>>(green2, green, d_source_green, d_dest_green,d_inside, d_mask, N, numColsSource);
cudaDeviceSynchronize();
jacobi<<<N/1024 + 1, 1024>>>(red, red2, d_source_red, d_dest_red, d_inside, d_mask, N, numColsSource);
jacobi<<<N/1024 + 1, 1024>>>(blue, blue2, d_source_blue, d_dest_blue, d_inside, d_mask, N, numColsSource);
jacobi<<<N/1024 + 1, 1024>>>(green, green2, d_source_green, d_dest_green, d_inside, d_mask, N, numColsSource);
cudaDeviceSynchronize();
}
storeBack<<<N/1024 + 1, 1024 >>>(d_dest, red, blue, green, d_inside, d_mask , N);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(h_blendedImg,d_dest,N* sizeof(uchar4), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaGetLastError());
} |
663505f9dd4474310f6301ade5c96a240934c404.hip | // !!! This is a file automatically generated by hipify!!!
/*********************************************************************
11
12 Copyright (C) 2017 by Sidney Ribeiro Junior
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27
28 ********************************************************************/
#define CUDA_API_PER_THREAD_DEFAULT_STREAM
#include "gpu.h"
#include <vector>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <algorithm>
#include <iostream>
#include <omp.h>
#include <string>
#include <sstream>
#include <hip/hip_runtime.h>
#include <map>
#include "structs.cuh"
#include "utils.cuh"
#include "inverted_index.cuh"
#include "simjoin.cuh"
#include "tests.cu"
#include "device_timing.hxx"
#define OUTPUT 1
using namespace std;
FileStats readInputFile(string &filename, vector<Entry> &entries, vector<Entry> &entriesmid, float threshold);
void allocVariables(DeviceVariables *dev_vars, Pair **similar_pairs, int num_terms, int block_size, int entries_size, int entriesmid_size, int num_sets);
void freeVariables(DeviceVariables *dev_vars, Pair **similar_pairs);
void write_output(Pair *similar_pairs, int totalSimilars, stringstream &outputfile);
/**
* Receives as parameters the training file name and the test file name
*/
int gpu(int argc, char **argv) {
if (argc != 7) {
cerr << "Wrong parameters. Correct usage: <executable> <input_file> <threshold> <output_file> <number_of_gpus> <size of blocks> <aggregate>" << endl;
exit(1);
}
DeviceTiming deviceTiming;
vector<Entry> entries, entriesmid;
float threshold = atof(argv[2]);
bool aggregate = atoi(argv[6]) == 1;
int gpuNum;
hipGetDeviceCount(&gpuNum);
if (gpuNum > atoi(argv[4]) && atoi(argv[4]) > 0)
gpuNum = atoi(argv[4]);
omp_set_num_threads(gpuNum);
string inputFileName(argv[1]);
// printf("Reading file %s...\n", inputFileName.c_str());
FileStats stats = readInputFile(inputFileName, entries, entriesmid, threshold);
ofstream ofsf(argv[3], ofstream::trunc);
ofsf.close();
ofstream ofsfileoutput(argv[3], ofstream::out | ofstream::app);
vector<stringstream*> outputString; // Each thread has an output string.
for (int i = 0; i < gpuNum; i++)
outputString.push_back(new stringstream);
// calculating maximum size of data structures
size_t free_mem, total_mem;
hipMemGetInfo(&free_mem, &total_mem);
long sizeEntries = (stats.start[stats.num_sets - 1] + stats.sizes[stats.num_sets - 1]) * sizeof(Entry);
long sizeVectorsN = stats.num_sets*sizeof(int);
long freeMem = free_mem - 3*sizeEntries - sizeVectorsN*4;
int block_size = atoi(argv[5]);
block_size = block_size < 1? freeMem / (stats.num_sets*(sizeof(float) + sizeof(Pair))): block_size;
block_size = block_size > stats.num_sets? stats.num_sets: block_size;
int block_num = ceil((float) stats.num_sets / block_size);
size_t finalResult = 0;
size_t probes = 0;
double start = gettime();
#pragma omp parallel num_threads(gpuNum)
{
int gpuid = omp_get_thread_num();
hipSetDevice(gpuid);
InvertedIndex index;
DeviceVariables dev_vars;
Pair *similar_pairs;
allocVariables(&dev_vars, &similar_pairs, stats.num_terms, block_size, entries.size(), entriesmid.size(), stats.num_sets);
gpuAssert(hipMemcpy(dev_vars.d_starts, &stats.start[0], stats.num_sets * sizeof(int), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(dev_vars.d_sizes, &stats.sizes[0], stats.num_sets * sizeof(int), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(dev_vars.d_entriesmid, &entriesmid[0], entriesmid.size() * sizeof(Entry), hipMemcpyHostToDevice));
gpuAssert(hipMemcpy(dev_vars.d_entries, &entries[0], entries.size() * sizeof(Entry), hipMemcpyHostToDevice));
for (int i = gpuid; i < block_num; i+= gpuNum) {
int entries_block_start = i*block_size;
int entries_offset = stats.startmid[entries_block_start];
int last_set = entries_block_start + block_size >= stats.num_sets? stats.num_sets - 1: entries_block_start + block_size - 1;
int entries_block_size = last_set - entries_block_start + 1;
int entries_size = stats.startmid[last_set] + get_midprefix(stats.sizes[last_set], threshold) - entries_offset;
//printf("=========Indexed Block %d=========\nset_offset = %d\nentrie_offset: %d\nlast_set: %d\nentries_size: %d\n", i, entries_block_start, entries_offset, last_set, entries_size);
// build the inverted index for block i of size block_size
index = make_inverted_index(stats.num_sets, stats.num_terms, entries_size, entries_offset, entriesmid, &dev_vars);
int indexed_offset = stats.start[entries_block_start];
for (int j = 0; j <= i; j++) { // calculate similarity between indexed sets and probe sets
int probe_block_start = j*block_size;
int last_probe = probe_block_start + block_size > stats.num_sets? stats.num_sets - 1: probe_block_start + block_size - 1;
int probe_block_size = last_probe - probe_block_start + 1;
int probes_offset = stats.start[probe_block_start];
// size filtering
if (stats.sizes[last_probe] < threshold * stats.sizes[entries_block_start])
continue;
//printf("=========Probe Block %d=========\nprobe_block_start = %d\nprobe_offset: %d\nlast_probe: %d\nprobe_block_size: %d\n===============================\n", j, probe_block_start, probes_offset,last_probe, probe_block_size);
int totalSimilars = findSimilars(index, threshold, &dev_vars, similar_pairs, probe_block_start,
probe_block_size, probes_offset, entries_block_start, entries_block_size, indexed_offset, block_size, aggregate, deviceTiming);
finalResult += totalSimilars;
probes++;
//print_intersection(dev_vars.d_intersection, block_size, i, j); //print_result(similar_pairs, totalSimilars);
if (!aggregate) write_output(similar_pairs, totalSimilars, *outputString[gpuid]);
}
}
freeVariables(&dev_vars, &similar_pairs);
}
double end = gettime();
if (!aggregate) {
for (int i = 0; i < gpuNum; i++)
ofsfileoutput << outputString[i]->str();
}
ofsfileoutput.close();
std::cout
<< "Result: " << finalResult << std::endl
<< "Runtime: " << end - start << " secs" << std::endl;
std::cout << deviceTiming;
return 0;
}
FileStats readInputFile(string &filename, vector<Entry> &entries, vector<Entry> &entriesmid, float threshold) {
ifstream input(filename.c_str());
string line;
FileStats stats;
int accumulatedsize = 0;
int accsizemid = 0;
int set_id = 0;
while (!input.eof()) {
getline(input, line);
if (line == "") continue;
vector<string> tokens = split(line, ' ');
int size = tokens.size();
stats.sizes.push_back(size);
stats.start.push_back(accumulatedsize);
accumulatedsize += size;
int midprefix = get_midprefix(size, threshold);
stats.startmid.push_back(accsizemid);
accsizemid += midprefix;
for (int i = 0; i < size; i++) {
int term_id = atoi(tokens[i].c_str());
stats.num_terms = max(stats.num_terms, term_id + 1);
entries.push_back(Entry(set_id, term_id, i));
if (i < midprefix) {
entriesmid.push_back(Entry(set_id, term_id, i));
}
}
set_id++;
}
stats.num_sets = stats.sizes.size();
input.close();
return stats;
}
void allocVariables(DeviceVariables *dev_vars, Pair **similar_pairs, int num_terms, int block_size, int entries_size,
int entriesmid_size, int num_sets) {
// Inverted index's variables
gpuAssert(hipMalloc(&dev_vars->d_inverted_index, entriesmid_size * sizeof(Entry)));
gpuAssert(hipMalloc(&dev_vars->d_entriesmid, entriesmid_size * sizeof(Entry)));
gpuAssert(hipMalloc(&dev_vars->d_index, num_terms * sizeof(int)));
gpuAssert(hipMalloc(&dev_vars->d_count, num_terms * sizeof(int)));
// Variables used to perform the similarity join
gpuAssert(hipMalloc(&dev_vars->d_entries, entries_size * sizeof(Entry)));
gpuAssert(hipMalloc(&dev_vars->d_intersection, (1 + block_size * block_size) * sizeof(int)));
gpuAssert(hipMalloc(&dev_vars->d_pairs, block_size * block_size * sizeof(Pair)));
gpuAssert(hipMalloc(&dev_vars->d_sizes, num_sets * sizeof(int)));
gpuAssert(hipMalloc(&dev_vars->d_starts, num_sets * sizeof(int)));
*similar_pairs = (Pair *)malloc(sizeof(Pair)*block_size*block_size);
}
void freeVariables(DeviceVariables *dev_vars, Pair **similar_pairs) {
hipFree(&dev_vars->d_inverted_index);
hipFree(&dev_vars->d_entriesmid);
hipFree(&dev_vars->d_index);
hipFree(&dev_vars->d_count);
hipFree(&dev_vars->d_entries);
hipFree(&dev_vars->d_intersection);
hipFree(&dev_vars->d_pairs);
hipFree(&dev_vars->d_sizes);
hipFree(&dev_vars->d_starts);
free(*similar_pairs);
}
void write_output(Pair *similar_pairs, int totalSimilars, stringstream &outputfile) {
for (int i = 0; i < totalSimilars; i++) {
outputfile << "(" << similar_pairs[i].set_x << ", " << similar_pairs[i].set_y << "): " << similar_pairs[i].similarity << endl;
}
}
| 663505f9dd4474310f6301ade5c96a240934c404.cu | /*********************************************************************
11
12 Copyright (C) 2017 by Sidney Ribeiro Junior
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27
28 ********************************************************************/
#define CUDA_API_PER_THREAD_DEFAULT_STREAM
#include "gpu.h"
#include <vector>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <algorithm>
#include <iostream>
#include <omp.h>
#include <string>
#include <sstream>
#include <cuda.h>
#include <map>
#include "structs.cuh"
#include "utils.cuh"
#include "inverted_index.cuh"
#include "simjoin.cuh"
#include "tests.cu"
#include "device_timing.hxx"
#define OUTPUT 1
using namespace std;
FileStats readInputFile(string &filename, vector<Entry> &entries, vector<Entry> &entriesmid, float threshold);
void allocVariables(DeviceVariables *dev_vars, Pair **similar_pairs, int num_terms, int block_size, int entries_size, int entriesmid_size, int num_sets);
void freeVariables(DeviceVariables *dev_vars, Pair **similar_pairs);
void write_output(Pair *similar_pairs, int totalSimilars, stringstream &outputfile);
/**
* Receives as parameters the training file name and the test file name
*/
int gpu(int argc, char **argv) {
if (argc != 7) {
cerr << "Wrong parameters. Correct usage: <executable> <input_file> <threshold> <output_file> <number_of_gpus> <size of blocks> <aggregate>" << endl;
exit(1);
}
DeviceTiming deviceTiming;
vector<Entry> entries, entriesmid;
float threshold = atof(argv[2]);
bool aggregate = atoi(argv[6]) == 1;
int gpuNum;
cudaGetDeviceCount(&gpuNum);
if (gpuNum > atoi(argv[4]) && atoi(argv[4]) > 0)
gpuNum = atoi(argv[4]);
omp_set_num_threads(gpuNum);
string inputFileName(argv[1]);
// printf("Reading file %s...\n", inputFileName.c_str());
FileStats stats = readInputFile(inputFileName, entries, entriesmid, threshold);
ofstream ofsf(argv[3], ofstream::trunc);
ofsf.close();
ofstream ofsfileoutput(argv[3], ofstream::out | ofstream::app);
vector<stringstream*> outputString; // Each thread has an output string.
for (int i = 0; i < gpuNum; i++)
outputString.push_back(new stringstream);
// calculating maximum size of data structures
size_t free_mem, total_mem;
cudaMemGetInfo(&free_mem, &total_mem);
long sizeEntries = (stats.start[stats.num_sets - 1] + stats.sizes[stats.num_sets - 1]) * sizeof(Entry);
long sizeVectorsN = stats.num_sets*sizeof(int);
long freeMem = free_mem - 3*sizeEntries - sizeVectorsN*4;
int block_size = atoi(argv[5]);
block_size = block_size < 1? freeMem / (stats.num_sets*(sizeof(float) + sizeof(Pair))): block_size;
block_size = block_size > stats.num_sets? stats.num_sets: block_size;
int block_num = ceil((float) stats.num_sets / block_size);
size_t finalResult = 0;
size_t probes = 0;
double start = gettime();
#pragma omp parallel num_threads(gpuNum)
{
int gpuid = omp_get_thread_num();
cudaSetDevice(gpuid);
InvertedIndex index;
DeviceVariables dev_vars;
Pair *similar_pairs;
allocVariables(&dev_vars, &similar_pairs, stats.num_terms, block_size, entries.size(), entriesmid.size(), stats.num_sets);
gpuAssert(cudaMemcpy(dev_vars.d_starts, &stats.start[0], stats.num_sets * sizeof(int), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(dev_vars.d_sizes, &stats.sizes[0], stats.num_sets * sizeof(int), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(dev_vars.d_entriesmid, &entriesmid[0], entriesmid.size() * sizeof(Entry), cudaMemcpyHostToDevice));
gpuAssert(cudaMemcpy(dev_vars.d_entries, &entries[0], entries.size() * sizeof(Entry), cudaMemcpyHostToDevice));
for (int i = gpuid; i < block_num; i+= gpuNum) {
int entries_block_start = i*block_size;
int entries_offset = stats.startmid[entries_block_start];
int last_set = entries_block_start + block_size >= stats.num_sets? stats.num_sets - 1: entries_block_start + block_size - 1;
int entries_block_size = last_set - entries_block_start + 1;
int entries_size = stats.startmid[last_set] + get_midprefix(stats.sizes[last_set], threshold) - entries_offset;
//printf("=========Indexed Block %d=========\nset_offset = %d\nentrie_offset: %d\nlast_set: %d\nentries_size: %d\n", i, entries_block_start, entries_offset, last_set, entries_size);
// build the inverted index for block i of size block_size
index = make_inverted_index(stats.num_sets, stats.num_terms, entries_size, entries_offset, entriesmid, &dev_vars);
int indexed_offset = stats.start[entries_block_start];
for (int j = 0; j <= i; j++) { // calculate similarity between indexed sets and probe sets
int probe_block_start = j*block_size;
int last_probe = probe_block_start + block_size > stats.num_sets? stats.num_sets - 1: probe_block_start + block_size - 1;
int probe_block_size = last_probe - probe_block_start + 1;
int probes_offset = stats.start[probe_block_start];
// size filtering
if (stats.sizes[last_probe] < threshold * stats.sizes[entries_block_start])
continue;
//printf("=========Probe Block %d=========\nprobe_block_start = %d\nprobe_offset: %d\nlast_probe: %d\nprobe_block_size: %d\n===============================\n", j, probe_block_start, probes_offset,last_probe, probe_block_size);
int totalSimilars = findSimilars(index, threshold, &dev_vars, similar_pairs, probe_block_start,
probe_block_size, probes_offset, entries_block_start, entries_block_size, indexed_offset, block_size, aggregate, deviceTiming);
finalResult += totalSimilars;
probes++;
//print_intersection(dev_vars.d_intersection, block_size, i, j); //print_result(similar_pairs, totalSimilars);
if (!aggregate) write_output(similar_pairs, totalSimilars, *outputString[gpuid]);
}
}
freeVariables(&dev_vars, &similar_pairs);
}
double end = gettime();
if (!aggregate) {
for (int i = 0; i < gpuNum; i++)
ofsfileoutput << outputString[i]->str();
}
ofsfileoutput.close();
std::cout
<< "Result: " << finalResult << std::endl
<< "Runtime: " << end - start << " secs" << std::endl;
std::cout << deviceTiming;
return 0;
}
FileStats readInputFile(string &filename, vector<Entry> &entries, vector<Entry> &entriesmid, float threshold) {
ifstream input(filename.c_str());
string line;
FileStats stats;
int accumulatedsize = 0;
int accsizemid = 0;
int set_id = 0;
while (!input.eof()) {
getline(input, line);
if (line == "") continue;
vector<string> tokens = split(line, ' ');
int size = tokens.size();
stats.sizes.push_back(size);
stats.start.push_back(accumulatedsize);
accumulatedsize += size;
int midprefix = get_midprefix(size, threshold);
stats.startmid.push_back(accsizemid);
accsizemid += midprefix;
for (int i = 0; i < size; i++) {
int term_id = atoi(tokens[i].c_str());
stats.num_terms = max(stats.num_terms, term_id + 1);
entries.push_back(Entry(set_id, term_id, i));
if (i < midprefix) {
entriesmid.push_back(Entry(set_id, term_id, i));
}
}
set_id++;
}
stats.num_sets = stats.sizes.size();
input.close();
return stats;
}
void allocVariables(DeviceVariables *dev_vars, Pair **similar_pairs, int num_terms, int block_size, int entries_size,
int entriesmid_size, int num_sets) {
// Inverted index's variables
gpuAssert(cudaMalloc(&dev_vars->d_inverted_index, entriesmid_size * sizeof(Entry)));
gpuAssert(cudaMalloc(&dev_vars->d_entriesmid, entriesmid_size * sizeof(Entry)));
gpuAssert(cudaMalloc(&dev_vars->d_index, num_terms * sizeof(int)));
gpuAssert(cudaMalloc(&dev_vars->d_count, num_terms * sizeof(int)));
// Variables used to perform the similarity join
gpuAssert(cudaMalloc(&dev_vars->d_entries, entries_size * sizeof(Entry)));
gpuAssert(cudaMalloc(&dev_vars->d_intersection, (1 + block_size * block_size) * sizeof(int)));
gpuAssert(cudaMalloc(&dev_vars->d_pairs, block_size * block_size * sizeof(Pair)));
gpuAssert(cudaMalloc(&dev_vars->d_sizes, num_sets * sizeof(int)));
gpuAssert(cudaMalloc(&dev_vars->d_starts, num_sets * sizeof(int)));
*similar_pairs = (Pair *)malloc(sizeof(Pair)*block_size*block_size);
}
void freeVariables(DeviceVariables *dev_vars, Pair **similar_pairs) {
cudaFree(&dev_vars->d_inverted_index);
cudaFree(&dev_vars->d_entriesmid);
cudaFree(&dev_vars->d_index);
cudaFree(&dev_vars->d_count);
cudaFree(&dev_vars->d_entries);
cudaFree(&dev_vars->d_intersection);
cudaFree(&dev_vars->d_pairs);
cudaFree(&dev_vars->d_sizes);
cudaFree(&dev_vars->d_starts);
free(*similar_pairs);
}
void write_output(Pair *similar_pairs, int totalSimilars, stringstream &outputfile) {
for (int i = 0; i < totalSimilars; i++) {
outputfile << "(" << similar_pairs[i].set_x << ", " << similar_pairs[i].set_y << "): " << similar_pairs[i].similarity << endl;
}
}
|
370c72991414ce5937931d0a83f6f0a96eecf4a1.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml.hpp>
#include <opencv2/imgproc.hpp>
#include <hip/hip_runtime.h>
#define NBINS 100
#define BLOCKSIZE 16
#define RANGE_H 255. // 179.
#define RANGE_L 255.
#define RANGE_S 255.
const int WINDOWS_NUMBER = 4;
const int HIST_SIZE = (NBINS * 3);
const int FEATURES_SIZE = HIST_SIZE * WINDOWS_NUMBER + 1;
#define GAUSSIAN_LENGTH 8
#define GAUSSIAN_LENGTH_W 26//2 + 6(parametros) * 4(secciones)
// 0 - class
// 1 - prob treshold
// 2 - h median
// 3 - h desv
// 4 - l median
// 5 - l desv
// 6 - s median
// 7 - s desv
int colorBytes;
int grayBytes;
int ProbBytes;
float *d_ParametersHeightForSquare, *d_ParametersWidthForSquare;
unsigned char *d_FieldImage;
unsigned char *d_PixelClass;
float *d_Probability;
unsigned char *d_PixelClass2;
float *d_Probability2;
float *d_gaussians;
int numberOfGaussians;
float *d_gaussians2;
int numberOfGaussians2;
int *d_numberOfClasses;
int *d_kPerClass;
float *d_Histogram;
float *d_maxDistances;
int *d_numberOfClasses_ball;
int *d_kPerClass_ball;
float *d_Histogram_ball;
float *d_maxDistances_ball;
int NumberOfClasses_ball;
int nHistogram_ball;
int NumberOfClasses;
int nHistogram;
int numberHistograms;
float *d_histograms;
int numberHistograms2;
float *d_histograms2;
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if(err!=hipSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void ParticleFilterNClassCUDA_kernel( unsigned char* FieldImage,
float *Histogram,
float *distances,
int nHistogram,
int *kPerClass,
int numberOfClasses,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
int grayWidthStep,
int ProbWidthStep,
unsigned char* PixelClass,
float *Probability)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Only valid threads perform memory I/O
if(xIndex < width && yIndex < height && xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int gray_tid = yIndex * width + xIndex;
const int prob_tid = gray_tid; //yIndex * width + xIndex;
//Compute the window for this point
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0);
int HHistogramTop[NBINS];
int LHistogramTop[NBINS];
int SHistogramTop[NBINS];
int HHistogrambutton[NBINS];
int LHistogrambutton[NBINS];
int SHistogrambutton[NBINS];
int bytesNbins = NBINS * sizeof(int);
memset(HHistogramTop, 0, bytesNbins);
memset(LHistogramTop, 0, bytesNbins);
memset(SHistogramTop, 0, bytesNbins);
memset(HHistogrambutton, 0, bytesNbins);
memset(LHistogrambutton, 0, bytesNbins);
memset(SHistogrambutton, 0, bytesNbins);
int SumaValidPixels;
int ValidTop = 0;
int ValidButton = 0;
int limitii = min(yIndex + HalfWindowHeight, height - 1);
int limitjj = min(xIndex + HalfWindowWidth, width - 1);
for (int ii = max(yIndex - HalfWindowHeight, 0); ii < limitii; ii++){
for (int jj = max(xIndex - HalfWindowWidth, 0); jj < limitjj; jj++){
//Get the color image values
const int colorIdPixel = ii * colorWidthStep + (3 * jj);
const unsigned char h_pixel = FieldImage[colorIdPixel ];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0))
{
if (ii < yIndex){
ValidTop++;
//Make the TOP histogram
HHistogramTop[(int)(h_pixel * NBINS/RANGE_H)]++;
LHistogramTop[(int)(l_pixel * NBINS/RANGE_L)]++;
SHistogramTop[(int)(s_pixel * NBINS/RANGE_S)]++;
}
else{
ValidButton++;
//Make the BUTTON histogram
HHistogrambutton[(int)(h_pixel * NBINS/RANGE_H)]++;
LHistogrambutton[(int)(l_pixel * NBINS/RANGE_L)]++;
SHistogrambutton[(int)(s_pixel * NBINS/RANGE_S)]++;
}
}
}
}
SumaValidPixels = ValidButton + ValidTop;
double a = fabs((double)ValidTop/SumaValidPixels - (double)ValidButton/SumaValidPixels);
if (true) //SumaValidPixels > HalfWindowWidth * HalfWindowHeight * 1) //&& a < .3)
{
//Checar si se parecen los histogramas
float* Distance = new float[nHistogram];
for(int n = 0; n < nHistogram; n++){
Distance[n] = 0;
for (int K=0;K<NBINS;K++){
Distance[n] += sqrtf((HHistogramTop[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K]);
Distance[n] += sqrtf((LHistogramTop[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (NBINS)]);
Distance[n] += sqrtf((SHistogramTop[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (2 * NBINS)]);
Distance[n] += sqrtf((HHistogrambutton[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (3 * NBINS)]);
Distance[n] += sqrtf((LHistogrambutton[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (4 * NBINS)]);
Distance[n] += sqrtf((SHistogrambutton[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (5 * NBINS)]);
}
// Distance[n]=Distance[n]/((float)NBINS*6.0);
Distance[n] = (1- (Distance[n]/6.0)) ;//* SumaValidPixels;
}
float minDistance = Distance[0];
int minIndex = 0;
for(int n = 1; n < nHistogram; n++){
if(Distance[n] < minDistance){
minDistance = Distance[n];
minIndex = n;
}
}
delete[] Distance;
int kNum = 0;
for (int n = 0; n < numberOfClasses; n++) {
kNum += kPerClass[n];
if(minIndex < kNum ) //&& minDistance) // < distances[n])
{
PixelClass[gray_tid] = static_cast<unsigned char>(n + 1);
Probability[prob_tid] = static_cast<float>(minDistance);
break;
}
}
}
}
}
void ReserveCudaMemory(std::vector<std::vector<float> > Histogram, std::vector<float> maxDistances, int _nHistogram, std::vector<int> kPerClasses,
int SizeHistograms, int _NumberOfClasses, float *ParametersHeightForSquare, float *ParametersWidthForSquare){
//Calculate total number of bytes of input and output image
int ParametersForSquareBytes = 4 * sizeof(float);
int HistogramsBytes = SizeHistograms * sizeof(float);
NumberOfClasses =_NumberOfClasses;
nHistogram = _nHistogram;
float *h_histogram = new float[nHistogram*SizeHistograms];
for(int i = 0; i < nHistogram; i++){
for(int j = 0; j < SizeHistograms; j++){
h_histogram[j+i*SizeHistograms] = Histogram[i][j];
}
}
SAFE_CALL(hipMalloc<int>(&d_kPerClass, sizeof(int) * kPerClasses.size()),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Histogram, nHistogram * HistogramsBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_maxDistances, sizeof(float) * maxDistances.size()),"CUDA Malloc Failed");
//SAFE_CALL(hipMemcpy(&d_numberOfClasses, &NumberOfClasses, sizeof(int), hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_kPerClass, kPerClasses.data(), sizeof(int) * kPerClasses.size(), hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_Histogram, h_histogram, HistogramsBytes * nHistogram, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_maxDistances, maxDistances.data(), sizeof(float) * maxDistances.size(), hipMemcpyHostToDevice),"CUDA Malloc Failed");
//Allocate device memory
SAFE_CALL(hipMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare, ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare, ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
delete[] h_histogram;
}
void ReserveCudaMemoryTexture(std::vector<std::vector<float> > histograms, std::vector<std::vector<float> > histograms2, std::vector<std::vector<float> > gaussians, std::vector<float> ParametersHeightForSquare, std::vector<float> ParametersWidthForSquare,
cv::Mat FieldImage, cv::Mat PixelClass, cv::Mat Probability){
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
int ParametersForSquareBytes = 4 * sizeof(float);
numberHistograms = histograms.size();
std::cout<<"Resevando memoria de cuda para "<< numberHistograms <<" histogramas";
int HistogramsBytes = numberHistograms * FEATURES_SIZE * sizeof(float);
float *h_histogram = new float[histograms.size() * histograms[0].size()];
for(unsigned i = 0; i < numberHistograms; i++){
for(unsigned j = 0; j < histograms[i].size(); j++){
h_histogram[j + i * FEATURES_SIZE] = histograms[i][j];
}
}
numberHistograms2 = histograms2.size();
std::cout<<"Resevando memoria de cuda para "<<numberHistograms2 <<" histogramas";
int HistogramsBytes2 = numberHistograms2 * FEATURES_SIZE * sizeof(float);
float *h_histogram2 = new float[histograms2.size() * histograms2[0].size()];
for(unsigned i = 0; i < numberHistograms2; i++){
for(unsigned j = 0; j < histograms2[i].size(); j++){
h_histogram2[j + i * FEATURES_SIZE] = histograms2[i][j];
}
}
numberOfGaussians = gaussians.size();
int gaussiansSize = gaussians.size() * GAUSSIAN_LENGTH_W * sizeof(float);
float *h_gaussians = new float[numberOfGaussians * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians[j + i * GAUSSIAN_LENGTH_W] = gaussians[i][j];
}
}
SAFE_CALL(hipMalloc<float>(&d_gaussians, gaussiansSize),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_gaussians, h_gaussians, gaussiansSize, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_histograms, HistogramsBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_histograms, h_histogram, HistogramsBytes, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_histograms2, HistogramsBytes2),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_histograms2, h_histogram2, HistogramsBytes2, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare.data(), ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare.data(), ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_PixelClass2, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Probability2, ProbBytes),"CUDA Malloc Failed");
delete[] h_histogram;
delete[] h_histogram2;
delete[] h_gaussians;
}
void ReserveCudaMemoryBall(std::vector<std::vector<float> > Histogram, std::vector<float> maxDistances, int _nHistogram, std::vector<int> kPerClasses,
int SizeHistograms, int _NumberOfClasses){
int HistogramsBytes = SizeHistograms * sizeof(float);
NumberOfClasses_ball =_NumberOfClasses;
nHistogram_ball = _nHistogram;
float *h_histogram = new float[nHistogram*SizeHistograms];
for(int i = 0; i < nHistogram; i++){
for(int j = 0; j < SizeHistograms; j++){
h_histogram[j+i*SizeHistograms] = Histogram[i][j];
}
}
SAFE_CALL(hipMalloc<int>(&d_kPerClass_ball, sizeof(int) * kPerClasses.size()),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Histogram_ball, nHistogram * HistogramsBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_maxDistances_ball, sizeof(float) * maxDistances.size()),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_kPerClass_ball, kPerClasses.data(), sizeof(int) * kPerClasses.size(), hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_Histogram_ball, h_histogram, HistogramsBytes * nHistogram, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_maxDistances_ball, maxDistances.data(), sizeof(float) * maxDistances.size(), hipMemcpyHostToDevice),"CUDA Malloc Failed");
delete[] h_histogram;
}
void FreeCudaMemory(){
//Free the device memory
SAFE_CALL(hipFree(d_gaussians),"CUDA Free Failed");
SAFE_CALL(hipFree(d_histograms),"CUDA Free Failed");
SAFE_CALL(hipFree(d_histograms2),"CUDA Free Failed");
SAFE_CALL(hipFree(d_ParametersHeightForSquare),"CUDA Free Failed");
SAFE_CALL(hipFree(d_ParametersWidthForSquare),"CUDA Free Failed");
SAFE_CALL(hipFree(d_FieldImage),"CUDA Malloc Failed");
SAFE_CALL(hipFree(d_PixelClass),"CUDA Malloc Failed");
SAFE_CALL(hipFree(d_Probability),"CUDA Malloc Failed");
SAFE_CALL(hipFree(d_PixelClass2),"CUDA Malloc Failed");
SAFE_CALL(hipFree(d_Probability2),"CUDA Malloc Failed");
// SAFE_CALL(hipFree(d_kPerClass),"CUDA Malloc Failed");
// SAFE_CALL(hipFree(d_maxDistances),"CUDA Malloc Failed");
}
void ParticleFilterNClassCUDA(const cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability)
{
// hipEvent_t start_hd, stop_hd;
// hipEvent_t start_dh, stop_dh;
// hipEvent_t start_k, stop_k;
// hipEventCreate(&start_hd); hipEventCreate(&stop_hd);
// hipEventCreate(&start_dh); hipEventCreate(&stop_dh);
// hipEventCreate(&start_k); hipEventCreate(&stop_k);
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
// hipEventRecord(start_hd, 0);
SAFE_CALL(hipMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(hipMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
// hipEventRecord(stop_hd, 0); hipEventSynchronize(stop_hd);
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
// hipEventRecord(start_k, 0);
//Launch the color conversion kernel
hipLaunchKernelGGL(( ParticleFilterNClassCUDA_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, d_Histogram, d_maxDistances, nHistogram,
d_kPerClass, NumberOfClasses,
d_ParametersHeightForSquare,d_ParametersWidthForSquare, FieldImage.cols, FieldImage.rows, FieldImage.step,
PixelClass.step, Probability.step, d_PixelClass, d_Probability);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
// hipEventRecord(stop_k, 0); hipEventSynchronize(stop_k);
// hipEventRecord(start_dh, 0);
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(Probability.ptr(), d_Probability,ProbBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipFree(d_FieldImage),"CUDA Free Failed");
SAFE_CALL(hipFree(d_PixelClass),"CUDA Free Failed");
SAFE_CALL(hipFree(d_Probability),"CUDA Free Failed");
// hipEventRecord(stop_dh, 0); hipEventSynchronize(stop_dh);
// float hostToDeviceTime, deviceToHostTime, kernelTime;
// hipEventElapsedTime(&hostToDeviceTime, start_hd, stop_hd);
// hipEventElapsedTime(&deviceToHostTime, start_dh, stop_dh);
// hipEventElapsedTime(&kernelTime, start_k, stop_k);
// printf("Tiempo de copiar datos de host to device %f \n", hostToDeviceTime);
// printf("Tiempo de copiar datos de device to host %f \n", deviceToHostTime);
// printf("Tiempo de kernel %f en milisegundos\n", kernelTime);
// hipEventDestroy(start_hd); hipEventDestroy(stop_hd);
// hipEventDestroy(start_dh); hipEventDestroy(stop_dh);
// hipEventDestroy(start_k); hipEventDestroy(stop_k);
}
void ParticleFilterBallCUDA(const cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability)
{
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
SAFE_CALL(hipMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(hipMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
hipLaunchKernelGGL(( ParticleFilterNClassCUDA_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, d_Histogram, d_maxDistances, nHistogram,
d_kPerClass, NumberOfClasses,
d_ParametersHeightForSquare,d_ParametersWidthForSquare, FieldImage.cols, FieldImage.rows, FieldImage.step,
PixelClass.step, Probability.step, d_PixelClass, d_Probability);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(hipMemcpy(PixelClass.ptr(), d_PixelClass, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(Probability.ptr(), d_Probability, ProbBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipFree(d_FieldImage),"CUDA Free Failed");
SAFE_CALL(hipFree(d_PixelClass),"CUDA Free Failed");
SAFE_CALL(hipFree(d_Probability),"CUDA Free Failed");
}
__global__ void ParticleFilterBallCUDA_kernel( unsigned char* FieldImage,
float *Histogram,
float *distances,
int nHistogram,
int *kPerClass,
int numberOfClasses,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
int grayWidthStep,
int ProbWidthStep,
unsigned char* PixelClass,
float *Probability)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Only valid threads perform memory I/O
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
FieldImage[color_tid] > 0 && FieldImage[color_tid + 1] > 0 && FieldImage[color_tid + 2] > 0){
const int gray_tid = yIndex * width + xIndex;
const int prob_tid = gray_tid; //yIndex * width + xIndex;
//Compute the window for this point
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/4.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/4.0);
int HHistogram[NBINS];
int LHistogram[NBINS];
int SHistogram[NBINS];
int bytesNbins = NBINS * sizeof(int);
memset(HHistogram, 0, bytesNbins);
memset(LHistogram, 0, bytesNbins);
memset(SHistogram, 0, bytesNbins);
int SumaValidPixels;
int limitii = min(yIndex + HalfWindowHeight, height - 1);
int limitjj = min(xIndex + HalfWindowWidth, width - 1);
for (int ii = max(yIndex - HalfWindowHeight, 0); ii < limitii; ii++){
for (int jj = max(xIndex - HalfWindowWidth, 0); jj < limitjj; jj++){
//Get the color image values
const int colorIdPixel = ii * colorWidthStep + (3 * jj);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (h_pixel > 0 && l_pixel > 0 && s_pixel > 0)
{
SumaValidPixels++;
HHistogram[(int)(h_pixel * NBINS/RANGE_H)]++;
LHistogram[(int)(l_pixel * NBINS/RANGE_L)]++;
SHistogram[(int)(s_pixel * NBINS/RANGE_S)]++;
}
}
}
float* Distance = new float[nHistogram];
for(int n = 0; n < nHistogram; n++){
Distance[n] = 0;
for (int K=0;K<NBINS;K++){
Distance[n] += sqrtf((HHistogram[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K]);
Distance[n] += sqrtf((LHistogram[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (NBINS)]);
Distance[n] += sqrtf((SHistogram[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (2 * NBINS)]);
}
// Distance[n]=Distance[n]/((float)NBINS*6.0);
float Decay=1.0;///(float)SumaValidPixels;
Distance[n]=(1- (Distance[n]/6.0))*Decay;
}
float minDistance = Distance[0];
int minIndex = 0;
for(int n = 1; n < nHistogram; n++){
if(Distance[n] < minDistance){
minDistance = Distance[n];
minIndex = n;
}
}
delete[] Distance;
int kNum = 0;
for (int n = 0; n < numberOfClasses; n++) {
kNum += kPerClass[n];
if(minIndex < kNum) // && minDistance < distances[n])
{
PixelClass[gray_tid] = static_cast<unsigned char>(n + 1);
Probability[prob_tid] = static_cast<float>(minDistance);
break;
}
}
}
}
__global__ void ParticleFilterBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
float H = 0;
float L = 0;
float S = 0;
float n = 0;
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
for (int yy = max(yIndex - HalfWindowHeight, 0); yy < limityy; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n++;
H += h_pixel;
L += l_pixel;
S += s_pixel;
}
}
}
if(n > HalfWindowHeight * HalfWindowWidth *.2){
float percent = n * 100 / (4 * HalfWindowHeight * HalfWindowWidth) * 1000;
H /= n;
L /= n;
S /= n;
int maxIndex;
float maxProb = 0;
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH * k;
float PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
float PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
float PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
float prob = PH * PL * PS;
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
if(maxProb > gaussians[GAUSSIAN_LENGTH * maxIndex + 1])
{
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH * maxIndex]);
Probability[gray_tid] = static_cast<float>(maxProb*percent);
}
}
}
}
__global__ void ParticleFilterWindowsBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
int yy = max(yIndex - HalfWindowHeight, 0);
int step = (limityy - yy)/4;
int limitStep = yy + step;
float H[4];
float L[4];
float S[4];
float n[4];
bool validWindow = true;
int totalValidPixels = 0;
for (int i = 0; i < 4; i++, limitStep+=step){
H[i] = 0;
L[i] = 0;
S[i] = 0;
n[i] = 0;
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n[i]++;
H[i] += h_pixel;
L[i] += l_pixel;
S[i] += s_pixel;
}
}
}
H[i] /= n[i];
L[i] /= n[i];
S[i] /= n[i];
totalValidPixels += n[i];
}
// if(n[0] < (2 * HalfWindowWidth * step) * 0.1 ){
// validWindow = false;
// }
for (int i = 1; i < 4; ++i) {
if(n[i] < (2 * HalfWindowWidth * step) * 0.2 ){
validWindow = false;
break;
}
}
if(validWindow){
float percent = totalValidPixels * 100 / (4 * HalfWindowHeight * HalfWindowWidth) * 100000;
int maxIndex;
float maxProb = 0;
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH_W * k + 2;
float prob = 1;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + 6 * i;
float PH = exp( (H[i] - gaussians[kernelPos])*(H[i]-gaussians[kernelPos]) / (-2*gaussians[kernelPos + 1])) / sqrt(2* M_PI *gaussians[kernelPos + 1]);
float PL = exp( (L[i] - gaussians[kernelPos + 2])*(L[i]-gaussians[kernelPos + 2]) / (-2*gaussians[kernelPos + 3])) / sqrt(2* M_PI *gaussians[kernelPos + 3]);
float PS = exp( (S[i] - gaussians[kernelPos + 4])*(S[i]-gaussians[kernelPos + 4]) / (-2*gaussians[kernelPos + 5])) / sqrt(2* M_PI *gaussians[kernelPos + 5]);
prob += PH * PL * PS;
}
if(prob == 1)
printf("uno\n");
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
if(maxProb > gaussians[GAUSSIAN_LENGTH_W * maxIndex + 1])
{
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH_W * maxIndex]);
Probability[gray_tid] = static_cast<float>(maxProb*percent);
}
}
}
}
__global__ void DoubleParticleFilterWindowsBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
unsigned char* PixelClass2,
float *Probability2,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians, // Equipo 1, equipo 2 y pasto
int gaussiansNumber,
float *gaussians2, // Para porteros y arbitros
int gaussiansNumber2
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
int yy = max(yIndex - HalfWindowHeight, 0);
int step = (limityy - yy)/4;
int limitStep = yy + step;
float H[4];
float L[4];
float S[4];
float n[4];
float e[4];
bool validWindow = true;
int totalValidPixels = 0;
int totalEdgePixels=0;
for (int i = 0; i < 4; i++, limitStep+=step){
H[i] = 0;
L[i] = 0;
S[i] = 0;
n[i] = 0;
e[i] = 0;
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int edgepixelid = yy * width + xx;
const unsigned char edge_pixel=PixelClass2[edgepixelid];
if(edge_pixel>0){
e[i]++;
totalEdgePixels++;
}
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n[i]++;
H[i] += h_pixel;
L[i] += l_pixel;
S[i] += s_pixel;
}
}
}
H[i] /= n[i];
L[i] /= n[i];
S[i] /= n[i];
totalValidPixels += n[i];
}
for (int i = 1; i < 4; ++i) {
if(n[i] < (2 * HalfWindowWidth * step) * 0.2 ){
validWindow = false;
break;
}
}
for (int i = 1; i < 4; ++i) {
if(e[i] < 1 ){
validWindow = false;
break;
}
}
float percentEdge = totalEdgePixels * 100 / (WINDOWS_NUMBER * HalfWindowHeight * HalfWindowWidth);
if(percentEdge<5){
validWindow=false;
}
if(validWindow){
float percent = totalValidPixels * 100 / (WINDOWS_NUMBER * HalfWindowHeight * HalfWindowWidth);
if (percent>40 && percent<70)
percent=80;
else if(percent>=70)
percent=100-percent;
// printf("p: %f\n",percent);
int maxIndex = 0;
float maxProb = 0;
float minDist;
#if 0
if (xIndex == 2251 && yIndex == 582) {
printf("--- Point\n");
printf("x = %d y = %d\n", xIndex, yIndex);
printf("--- Window dimensions\n");
printf("lx = %d ly = %d\n", HalfWindowWidth*2, HalfWindowHeight*2);
printf("--- Window\n");
printf("X: start = %d end = %d\n", max(xIndex - HalfWindowWidth, 0), limitxx);
printf("Y: start = %d end = %d\n", max(yIndex - HalfWindowHeight, 0), max(yIndex - HalfWindowHeight, 0) + 4*step);
printf("--- Gaussians\n");
printf("Number of gaussians = %d\n", gaussiansNumber);
printf("Gaussian length = %d\n", GAUSSIAN_LENGTH);
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = (GAUSSIAN_LENGTH_W * k) + 2;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + (6 * i);
for (unsigned int p = 0; p < 6; p+=2)
printf("%f %f ", gaussians[kernelPos+p], gaussians[kernelPos+1+p]);
}
}
printf("--- Color\n");
printf("Color width step = %d\n", colorWidthStep);
printf("\n");
printf("--- Colors in window\n");
yy = max(yIndex - HalfWindowHeight, 0);
limitStep = yy + step;
for (int i = 0; i < 4; i++, limitStep+=step) {
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++) {
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)) {
printf("h = %u l = %u s = %u\n", h_pixel, l_pixel, s_pixel);
}
}
}
}
}
#endif
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = (GAUSSIAN_LENGTH_W * k) + 2;
float prob = 0;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + (6 * i);
/*
float PH = exp((-0.5 * (H[i] - gaussians[kernelPos ])*(H[i]-gaussians[kernelPos ])) / (gaussians[kernelPos + 1])) / (gaussians[kernelPos + 1]*2.506628);
float PL = exp((-0.5 * (L[i] - gaussians[kernelPos + 2])*(L[i]-gaussians[kernelPos + 2])) / (gaussians[kernelPos + 3])) / (gaussians[kernelPos + 3]*2.506628);
float PS = exp((-0.5 * (S[i] - gaussians[kernelPos + 4])*(S[i]-gaussians[kernelPos + 4])) / (gaussians[kernelPos + 5])) / (gaussians[kernelPos + 5]*2.506628);
//*/
// Distances to the mean
float PH = (H[i] - gaussians[kernelPos ])*(H[i] - gaussians[kernelPos ]);
float PL = (L[i] - gaussians[kernelPos + 2])*(L[i] - gaussians[kernelPos + 2]);
float PS = (S[i] - gaussians[kernelPos + 4])*(S[i] - gaussians[kernelPos + 4]);
//prob += log(1+PH) + log(1+PL) + log(1+PS);
prob += PH + PL + PS;
//printf("p: %f, ", prob);
}
prob=sqrt(prob);
// if(prob == 1)
// printf("uno\n");
// if(prob > maxProb){
// maxProb = prob;
// maxIndex = k;
// // printf("aquillegamos");
// }
if(k==0){
minDist=prob;
maxIndex = k;
}
else if(prob<minDist){
minDist=prob;
maxIndex = k;
}
}
// if(maxProb > gaussians[GAUSSIAN_LENGTH_W * maxIndex + 1])
// {
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH_W * maxIndex]);
Probability[gray_tid] = static_cast<float>(minDist);//maxProb);
// }
return;
maxIndex = maxProb = 0;
for (unsigned k = 0; k < gaussiansNumber2; k++){
int gausPos = GAUSSIAN_LENGTH_W * k + 2;
float prob = 0;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + 6 * i;
float PH = exp( (H[i] - gaussians2[kernelPos])*(H[i]-gaussians2[kernelPos]) / (-2*gaussians2[kernelPos + 1])) / sqrt(2* M_PI *gaussians2[kernelPos + 1]);
float PL = exp( (L[i] - gaussians2[kernelPos + 2])*(L[i]-gaussians2[kernelPos + 2]) / (-2*gaussians2[kernelPos + 3])) / sqrt(2* M_PI *gaussians2[kernelPos + 3]);
float PS = exp( (S[i] - gaussians2[kernelPos + 4])*(S[i]-gaussians2[kernelPos + 4]) / (-2*gaussians2[kernelPos + 5])) / sqrt(2* M_PI *gaussians2[kernelPos + 5]);
prob += PH * PL * PS;
}
if(prob == 1)
printf("uno\n");
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
if(maxProb > gaussians2[GAUSSIAN_LENGTH_W * maxIndex + 1])
{
const int gray_tid = yIndex * width + xIndex;
PixelClass2[gray_tid] = static_cast<unsigned char>(gaussians2[GAUSSIAN_LENGTH_W * maxIndex]);
Probability2[gray_tid] = static_cast<float>(maxProb*percent);
}
}
}
}
__global__ void DoubleParticleFilterWindowsHistogram_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
unsigned char* PixelClass2,
float *Probability2,
int numberModels,
float* histograms,
int numberModels2,
float* histograms2,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep
// unsigned char* PixelClass2,
// float *Probability2,
// float *gaussians,
// int gaussiansNumber,
// float *gaussians2,
// int gaussiansNumber2
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int gray_tid = yIndex * width + xIndex;
const int prob_tid = gray_tid; //yIndex * width + xIndex;
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
int yy = max(yIndex - HalfWindowHeight, 0);
int step = (limityy - yy)/WINDOWS_NUMBER;
int limitStep = yy + step;
int validNumber[WINDOWS_NUMBER];
int histogramH[WINDOWS_NUMBER][NBINS];
int histogramL[WINDOWS_NUMBER][NBINS];
int histogramS[WINDOWS_NUMBER][NBINS];
bool validWindow = true;
int totalValidPixels = 0;
const int BYTES_HIST = NBINS * sizeof(int);
///Calcula histograma de esta particula
for (int i = 0; i < WINDOWS_NUMBER; i++, limitStep+=step){
memset(histogramH[i], 0, BYTES_HIST);
memset(histogramL[i], 0, BYTES_HIST);
memset(histogramS[i], 0, BYTES_HIST);
validNumber[i] = 0;
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0))
{
validNumber[i]++;
histogramH[i][(int)(h_pixel * NBINS/RANGE_H)]++;
histogramL[i][(int)(l_pixel * NBINS/RANGE_L)]++;
histogramS[i][(int)(s_pixel * NBINS/RANGE_S)]++;
}
}
}
totalValidPixels += validNumber[i];
}
/// Validar ventana por cantidad de pixeles, puede morir
// if(validNumber[0] < (2 * HalfWindowWidth * step) * 0.1 ){
// validWindow = false;
// }
for (int i = 1; i < WINDOWS_NUMBER; ++i) {
if(validNumber[i] < (2 * HalfWindowWidth * step) * 0.20 ){
validWindow = false;
break;
}
}
if(validWindow){
float percent = totalValidPixels * 100 / (WINDOWS_NUMBER * HalfWindowHeight * HalfWindowWidth);
if (percent>40 && percent<70)
percent=80;
else if(percent>=70)
percent=100-percent;
// printf("p: %f\n",percent);
float* distances = new float[numberModels];
///Checar si se parecen los histogramas
for(int i = 0; i < numberModels; i++){
distances[i] = 0;
for (int k = 1; k < WINDOWS_NUMBER; k++){
int histogramPosition = (FEATURES_SIZE * i) + (k * HIST_SIZE);
for (int j = 0; j < NBINS; j++){
distances[i] += sqrtf((histogramH[k][j]/(float)validNumber[k]) * histograms[histogramPosition + j]);
distances[i] += sqrtf((histogramL[k][j]/(float)validNumber[k]) * histograms[histogramPosition + j + (NBINS)]);
distances[i] += sqrtf((histogramS[k][j]/(float)validNumber[k]) * histograms[histogramPosition + j + (2 * NBINS)]);
}
}
// distances[n] = distances[n]/((float)NBINS*6.0);
distances[i] = (1-(distances[i]/(3*(WINDOWS_NUMBER-1)))); //* SumaValidPixels;
// distances[i] = (3*(WINDOWS_NUMBER-1))-(distances[i]); //* SumaValidPixels;
}
float minDistance = distances[0];
int minIndex = 0;
for(int i = 1; i < numberModels; i++){
if(distances[i] < minDistance){
minDistance = distances[i];
minIndex = i;
}
}
PixelClass[gray_tid] = static_cast<unsigned char>(histograms[FEATURES_SIZE * minIndex]);
Probability[prob_tid] = static_cast<float>((minDistance * percent)); //+sqrtf(yIndex/1000)
delete[] distances;
return;
if(xIndex > (width/3) && xIndex < 2*(width/3))
return;
//&& yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
distances = new float[numberModels];
///Checar si se parecen los histogramas (Porteros y arbitros)
for(int i = 0; i < numberModels2; i++){
distances[i] = 0;
for (int k = 1; k < WINDOWS_NUMBER; k++){
int histogramPosition = FEATURES_SIZE * i + k * HIST_SIZE;
for (int j = 0; j < NBINS; j++){
distances[i] += sqrtf((histogramH[k][j]/(float)totalValidPixels) * histograms2[histogramPosition + j]);
distances[i] += sqrtf((histogramL[k][j]/(float)totalValidPixels) * histograms2[histogramPosition + j + (NBINS)]);
distances[i] += sqrtf((histogramS[k][j]/(float)totalValidPixels) * histograms2[histogramPosition + j + (2 * NBINS)]);
}
}
// distances[n] = distances[n]/((float)NBINS*6.0);
distances[i] = (1-(distances[i]/(3*(WINDOWS_NUMBER-1)))); //* SumaValidPixels;
}
minDistance = distances[0];
minIndex = 0;
for(int i = 1; i < numberModels2; i++){
if(distances[i] < minDistance){
minDistance = distances[i];
minIndex = i;
}
}
PixelClass2[gray_tid] = static_cast<unsigned char>(histograms2[FEATURES_SIZE * minIndex]);
Probability2[prob_tid] = static_cast<float>(minDistance * percent);
delete[] distances;
}
}
}
void ReserveCudaMemoryBayes(std::vector<std::vector<float> > gaussians, std::vector<float> ParametersHeightForSquare, std::vector<float> ParametersWidthForSquare, cv::Mat FieldImage, cv::Mat PixelClass, cv::Mat Probability){
int ParametersForSquareBytes = 4 * sizeof(float);
int gaussiansSize = gaussians.size() * GAUSSIAN_LENGTH_W * sizeof(float);
numberOfGaussians = gaussians.size();
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
float *h_gaussians = new float[numberOfGaussians * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians[j + i * GAUSSIAN_LENGTH_W] = gaussians[i][j];
}
}
SAFE_CALL(hipMalloc<float>(&d_gaussians, gaussiansSize),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_gaussians, h_gaussians, gaussiansSize, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare.data(), ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare.data(), ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
delete[] h_gaussians;
}
void ReserveCudaMemoryBayes(std::vector<std::vector<float> > gaussians,std::vector<std::vector<float> > gaussians2, std::vector<float> ParametersHeightForSquare, std::vector<float> ParametersWidthForSquare, cv::Mat FieldImage, cv::Mat PixelClass, cv::Mat Probability){
int ParametersForSquareBytes = 4 * sizeof(float);
int gaussiansSize = gaussians.size() * GAUSSIAN_LENGTH_W * sizeof(float);
int gaussiansSize2 = gaussians2.size() * GAUSSIAN_LENGTH_W * sizeof(float);
numberOfGaussians = gaussians.size();
numberOfGaussians2 = gaussians2.size();
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
float *h_gaussians = new float[numberOfGaussians * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians[j + i * GAUSSIAN_LENGTH_W] = gaussians[i][j];
}
}
float *h_gaussians2 = new float[numberOfGaussians2 * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians2; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians2[j + i * GAUSSIAN_LENGTH_W] = gaussians2[i][j];
}
}
SAFE_CALL(hipMalloc<float>(&d_gaussians, gaussiansSize),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_gaussians, h_gaussians, gaussiansSize, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_gaussians2, gaussiansSize2),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_gaussians2, h_gaussians2, gaussiansSize2, hipMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare.data(), ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare.data(), ParametersForSquareBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_PixelClass2, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(hipMalloc<float>(&d_Probability2, ProbBytes),"CUDA Malloc Failed");
delete[] h_gaussians;
delete[] h_gaussians2;
}
void FreeCudaMemoryBayes(){
//Free the device memory
SAFE_CALL(hipFree(d_gaussians),"CUDA Free Failed");
SAFE_CALL(hipFree(d_ParametersHeightForSquare),"CUDA Free Failed");
SAFE_CALL(hipFree(d_ParametersWidthForSquare),"CUDA Free Failed");
SAFE_CALL(hipFree(d_FieldImage),"CUDA Malloc Failed");
SAFE_CALL(hipFree(d_PixelClass),"CUDA Malloc Failed");
SAFE_CALL(hipFree(d_Probability),"CUDA Malloc Failed");
}
__global__ void ParticleFilterPixelsBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int colorIdPixel = yIndex * colorWidthStep + (3 * xIndex);
const unsigned char H = FieldImage[colorIdPixel];
const unsigned char L = FieldImage[colorIdPixel + 1];
const unsigned char S = FieldImage[colorIdPixel + 2];
if(!(H==0 && L==0 && S==0)){
int maxIndex;
double maxProb = 0;
for (int k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH * k;
double PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
double PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
double PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
double prob = PH * PL * PS;
//printf("%f %f %f = %f\n",PH,PL,PS,prob);
if(gaussians[gausPos] == 1 && prob > 0){
maxProb = prob;
maxIndex = k;
//printf("p: %f\n",prob);
break;
}
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
//printf("prob: %f k: %d\n", maxProb, maxIndex); //gaussians[GAUSSIAN_LENGTH * maxIndex]);
//printf("%f\n",gaussians[GAUSSIAN_LENGTH * maxIndex]);
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH * maxIndex]);
Probability[gray_tid] = static_cast<float>(maxProb);
}
}
}
__global__ void RemoveByClass_kernel( unsigned char* FieldImage,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const unsigned char H = FieldImage[color_tid];
const unsigned char L = FieldImage[color_tid + 1];
const unsigned char S = FieldImage[color_tid + 2];
for (int k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH_W * k;
double PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
double PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
double PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
double prob = PH * PL * PS;
if(gaussians[gausPos] == 1 && prob > 0){
FieldImage[color_tid] = 0;
FieldImage[color_tid + 1] = 0;
FieldImage[color_tid + 2] = 0;
break;
}
}
}
}
__global__ void nonMaximumSuppression_kernel(
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int gray_tid = yIndex * width + xIndex;
if(xIndex < width && yIndex < height && PixelClass[gray_tid] != 0 //&& xIndex % 4 == 0 && yIndex % 4 == 0
){
const int NeighborhoodX = (int)((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3]);
const int NeighborhoodY = (int)((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3]);
int ClassValue = PixelClass[gray_tid];
double ProbValue = Probability[gray_tid];
bool IsMaximum = true;
for (int yy = yIndex - NeighborhoodY; IsMaximum && yy <= yIndex + NeighborhoodY; yy++) {
for (int xx= xIndex - NeighborhoodX; IsMaximum && xx <= xIndex + NeighborhoodX; xx++){
if (!(yy == yIndex && xx == xIndex) && yy < height && xx < width && yy > 0 && xx > 0){
const int pixelPosition = yy * width + xx;
// if (PixelClass[pixelPosition]>0 && Probability[pixelPosition] >ProbValue){
if (PixelClass[pixelPosition]>0 && Probability[pixelPosition] < ProbValue) {
// if ((PixelClass[pixelPosition] == ClassValue && Probability[pixelPosition] < ProbValue)){
PixelClass[gray_tid] = 0;
IsMaximum = false;
return;
}
}
}
}
}
}
__global__ void RemoveByClassWindow_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth =1;//(int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = 1;//(int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
float H = 0;
float L = 0;
float S = 0;
float n = 0;
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
for (int yy = max(yIndex - HalfWindowHeight, 0); yy < limityy; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n++;
H += h_pixel;
L += l_pixel;
S += s_pixel;
}
}
}
H /= n;
L /= n;
S /= n;
// FieldImage[color_tid] = 0;
// FieldImage[color_tid + 1] = 0;
// FieldImage[color_tid + 2] = 0;
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH * k;
float PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
float PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
float PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
float prob = PH * PL * PS;
if(gaussians[gausPos] == 1 && prob > 0){
// FieldImage[color_tid] = 180;
// FieldImage[color_tid + 1] = 255;
// FieldImage[color_tid + 2] = 255;
FieldImage[color_tid] = 0;
FieldImage[color_tid + 1] = 0;
FieldImage[color_tid + 2] = 0;
break;
}
}
}
}
void ParticleFilterBayesCUDA(cv::Mat &FieldImage, cv::Mat& PixelClass, cv::Mat& Probability)
{
SAFE_CALL(hipMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(hipMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
hipLaunchKernelGGL(( RemoveByClass_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// RemoveByClassWindow_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
// cv::Mat img = FieldImage.clone();
// SAFE_CALL(hipMemcpy(img.ptr(), d_FieldImage, colorBytes, hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
// cv::cvtColor(img, img, CV_HLS2BGR);
// cv::imshow("img", img);
hipLaunchKernelGGL(( ParticleFilterWindowsBayes_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// ParticleFilterPixelsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
hipLaunchKernelGGL(( nonMaximumSuppression_kernel), dim3(grid), dim3(block), 0, 0, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(hipMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(Probability.ptr(), d_Probability,ProbBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
}
void ParticleFilterBayesCUDA(cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability, cv::Mat& PixelClass2, cv::Mat& Probability2)
{
SAFE_CALL(hipMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(hipMemcpy(d_PixelClass2, PixelClass2.ptr(), grayBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
hipLaunchKernelGGL(( RemoveByClass_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// RemoveByClassWindow_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
// cv::Mat img = FieldImage.clone();
// SAFE_CALL(hipMemcpy(img.ptr(), d_FieldImage, colorBytes, hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
// cv::cvtColor(img, img, CV_HLS2BGR);
// cv::imshow("img", img);
hipLaunchKernelGGL(( DoubleParticleFilterWindowsBayes_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, d_PixelClass, d_Probability, d_PixelClass2, d_Probability2, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians, d_gaussians2, numberOfGaussians2);
// ParticleFilterPixelsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(hipMemset(d_PixelClass2, 0, grayBytes),"CUDA Memset Failed");
hipLaunchKernelGGL(( nonMaximumSuppression_kernel), dim3(grid), dim3(block), 0, 0, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
//Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
hipLaunchKernelGGL(( nonMaximumSuppression_kernel), dim3(grid), dim3(block), 0, 0, d_PixelClass2, d_Probability2, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(hipMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(Probability.ptr(), d_Probability,ProbBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(PixelClass2.ptr(), d_PixelClass2,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(Probability2.ptr(), d_Probability2,ProbBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(FieldImage.ptr(), d_FieldImage, colorBytes, hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
}
void ParticleFilterHistCUDA(cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability, cv::Mat& PixelClass2, cv::Mat& Probability2)
{
SAFE_CALL(hipMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, hipMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(hipMemset(d_PixelClass2, 0, grayBytes),"CUDA Memset Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
hipLaunchKernelGGL(( RemoveByClass_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// RemoveByClassWindow_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
//cv::Mat img = FieldImage.clone();
//SAFE_CALL(hipMemcpy(img.ptr(), d_FieldImage, colorBytes, hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//cv::cvtColor(img, img, CV_HLS2BGR);
//cv::imshow("img", img);
hipLaunchKernelGGL(( DoubleParticleFilterWindowsHistogram_kernel), dim3(grid), dim3(block), 0, 0, d_FieldImage, d_PixelClass, d_Probability, d_PixelClass2, d_Probability2,
numberHistograms, d_histograms, numberHistograms2, d_histograms2,
d_ParametersHeightForSquare, d_ParametersWidthForSquare,
FieldImage.cols, FieldImage.rows, FieldImage.step);
// ParticleFilterPixelsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
hipLaunchKernelGGL(( nonMaximumSuppression_kernel), dim3(grid), dim3(block), 0, 0, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
// hipLaunchKernelGGL(( nonMaximumSuppression_kernel), dim3(grid), dim3(block), 0, 0, d_PixelClass2, d_Probability2, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
// SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(hipMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(Probability.ptr(), d_Probability,ProbBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(PixelClass2.ptr(), d_PixelClass2,grayBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(Probability2.ptr(), d_Probability2,ProbBytes,hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(FieldImage.ptr(), d_FieldImage, colorBytes, hipMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
}
| 370c72991414ce5937931d0a83f6f0a96eecf4a1.cu | #include <iostream>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml.hpp>
#include <opencv2/imgproc.hpp>
#include <cuda_runtime.h>
#define NBINS 100
#define BLOCKSIZE 16
#define RANGE_H 255. // 179.
#define RANGE_L 255.
#define RANGE_S 255.
const int WINDOWS_NUMBER = 4;
const int HIST_SIZE = (NBINS * 3);
const int FEATURES_SIZE = HIST_SIZE * WINDOWS_NUMBER + 1;
#define GAUSSIAN_LENGTH 8
#define GAUSSIAN_LENGTH_W 26//2 + 6(parametros) * 4(secciones)
// 0 - class
// 1 - prob treshold
// 2 - h median
// 3 - h desv
// 4 - l median
// 5 - l desv
// 6 - s median
// 7 - s desv
int colorBytes;
int grayBytes;
int ProbBytes;
float *d_ParametersHeightForSquare, *d_ParametersWidthForSquare;
unsigned char *d_FieldImage;
unsigned char *d_PixelClass;
float *d_Probability;
unsigned char *d_PixelClass2;
float *d_Probability2;
float *d_gaussians;
int numberOfGaussians;
float *d_gaussians2;
int numberOfGaussians2;
int *d_numberOfClasses;
int *d_kPerClass;
float *d_Histogram;
float *d_maxDistances;
int *d_numberOfClasses_ball;
int *d_kPerClass_ball;
float *d_Histogram_ball;
float *d_maxDistances_ball;
int NumberOfClasses_ball;
int nHistogram_ball;
int NumberOfClasses;
int nHistogram;
int numberHistograms;
float *d_histograms;
int numberHistograms2;
float *d_histograms2;
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if(err!=cudaSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void ParticleFilterNClassCUDA_kernel( unsigned char* FieldImage,
float *Histogram,
float *distances,
int nHistogram,
int *kPerClass,
int numberOfClasses,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
int grayWidthStep,
int ProbWidthStep,
unsigned char* PixelClass,
float *Probability)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Only valid threads perform memory I/O
if(xIndex < width && yIndex < height && xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int gray_tid = yIndex * width + xIndex;
const int prob_tid = gray_tid; //yIndex * width + xIndex;
//Compute the window for this point
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0);
int HHistogramTop[NBINS];
int LHistogramTop[NBINS];
int SHistogramTop[NBINS];
int HHistogrambutton[NBINS];
int LHistogrambutton[NBINS];
int SHistogrambutton[NBINS];
int bytesNbins = NBINS * sizeof(int);
memset(HHistogramTop, 0, bytesNbins);
memset(LHistogramTop, 0, bytesNbins);
memset(SHistogramTop, 0, bytesNbins);
memset(HHistogrambutton, 0, bytesNbins);
memset(LHistogrambutton, 0, bytesNbins);
memset(SHistogrambutton, 0, bytesNbins);
int SumaValidPixels;
int ValidTop = 0;
int ValidButton = 0;
int limitii = min(yIndex + HalfWindowHeight, height - 1);
int limitjj = min(xIndex + HalfWindowWidth, width - 1);
for (int ii = max(yIndex - HalfWindowHeight, 0); ii < limitii; ii++){
for (int jj = max(xIndex - HalfWindowWidth, 0); jj < limitjj; jj++){
//Get the color image values
const int colorIdPixel = ii * colorWidthStep + (3 * jj);
const unsigned char h_pixel = FieldImage[colorIdPixel ];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0))
{
if (ii < yIndex){
ValidTop++;
//Make the TOP histogram
HHistogramTop[(int)(h_pixel * NBINS/RANGE_H)]++;
LHistogramTop[(int)(l_pixel * NBINS/RANGE_L)]++;
SHistogramTop[(int)(s_pixel * NBINS/RANGE_S)]++;
}
else{
ValidButton++;
//Make the BUTTON histogram
HHistogrambutton[(int)(h_pixel * NBINS/RANGE_H)]++;
LHistogrambutton[(int)(l_pixel * NBINS/RANGE_L)]++;
SHistogrambutton[(int)(s_pixel * NBINS/RANGE_S)]++;
}
}
}
}
SumaValidPixels = ValidButton + ValidTop;
double a = fabs((double)ValidTop/SumaValidPixels - (double)ValidButton/SumaValidPixels);
if (true) //SumaValidPixels > HalfWindowWidth * HalfWindowHeight * 1) //&& a < .3)
{
//Checar si se parecen los histogramas
float* Distance = new float[nHistogram];
for(int n = 0; n < nHistogram; n++){
Distance[n] = 0;
for (int K=0;K<NBINS;K++){
Distance[n] += sqrtf((HHistogramTop[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K]);
Distance[n] += sqrtf((LHistogramTop[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (NBINS)]);
Distance[n] += sqrtf((SHistogramTop[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (2 * NBINS)]);
Distance[n] += sqrtf((HHistogrambutton[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (3 * NBINS)]);
Distance[n] += sqrtf((LHistogrambutton[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (4 * NBINS)]);
Distance[n] += sqrtf((SHistogrambutton[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (5 * NBINS)]);
}
// Distance[n]=Distance[n]/((float)NBINS*6.0);
Distance[n] = (1- (Distance[n]/6.0)) ;//* SumaValidPixels;
}
float minDistance = Distance[0];
int minIndex = 0;
for(int n = 1; n < nHistogram; n++){
if(Distance[n] < minDistance){
minDistance = Distance[n];
minIndex = n;
}
}
delete[] Distance;
int kNum = 0;
for (int n = 0; n < numberOfClasses; n++) {
kNum += kPerClass[n];
if(minIndex < kNum ) //&& minDistance) // < distances[n])
{
PixelClass[gray_tid] = static_cast<unsigned char>(n + 1);
Probability[prob_tid] = static_cast<float>(minDistance);
break;
}
}
}
}
}
void ReserveCudaMemory(std::vector<std::vector<float> > Histogram, std::vector<float> maxDistances, int _nHistogram, std::vector<int> kPerClasses,
int SizeHistograms, int _NumberOfClasses, float *ParametersHeightForSquare, float *ParametersWidthForSquare){
//Calculate total number of bytes of input and output image
int ParametersForSquareBytes = 4 * sizeof(float);
int HistogramsBytes = SizeHistograms * sizeof(float);
NumberOfClasses =_NumberOfClasses;
nHistogram = _nHistogram;
float *h_histogram = new float[nHistogram*SizeHistograms];
for(int i = 0; i < nHistogram; i++){
for(int j = 0; j < SizeHistograms; j++){
h_histogram[j+i*SizeHistograms] = Histogram[i][j];
}
}
SAFE_CALL(cudaMalloc<int>(&d_kPerClass, sizeof(int) * kPerClasses.size()),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Histogram, nHistogram * HistogramsBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_maxDistances, sizeof(float) * maxDistances.size()),"CUDA Malloc Failed");
//SAFE_CALL(cudaMemcpy(&d_numberOfClasses, &NumberOfClasses, sizeof(int), cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_kPerClass, kPerClasses.data(), sizeof(int) * kPerClasses.size(), cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_Histogram, h_histogram, HistogramsBytes * nHistogram, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_maxDistances, maxDistances.data(), sizeof(float) * maxDistances.size(), cudaMemcpyHostToDevice),"CUDA Malloc Failed");
//Allocate device memory
SAFE_CALL(cudaMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare, ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare, ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
delete[] h_histogram;
}
void ReserveCudaMemoryTexture(std::vector<std::vector<float> > histograms, std::vector<std::vector<float> > histograms2, std::vector<std::vector<float> > gaussians, std::vector<float> ParametersHeightForSquare, std::vector<float> ParametersWidthForSquare,
cv::Mat FieldImage, cv::Mat PixelClass, cv::Mat Probability){
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
int ParametersForSquareBytes = 4 * sizeof(float);
numberHistograms = histograms.size();
std::cout<<"Resevando memoria de cuda para "<< numberHistograms <<" histogramas";
int HistogramsBytes = numberHistograms * FEATURES_SIZE * sizeof(float);
float *h_histogram = new float[histograms.size() * histograms[0].size()];
for(unsigned i = 0; i < numberHistograms; i++){
for(unsigned j = 0; j < histograms[i].size(); j++){
h_histogram[j + i * FEATURES_SIZE] = histograms[i][j];
}
}
numberHistograms2 = histograms2.size();
std::cout<<"Resevando memoria de cuda para "<<numberHistograms2 <<" histogramas";
int HistogramsBytes2 = numberHistograms2 * FEATURES_SIZE * sizeof(float);
float *h_histogram2 = new float[histograms2.size() * histograms2[0].size()];
for(unsigned i = 0; i < numberHistograms2; i++){
for(unsigned j = 0; j < histograms2[i].size(); j++){
h_histogram2[j + i * FEATURES_SIZE] = histograms2[i][j];
}
}
numberOfGaussians = gaussians.size();
int gaussiansSize = gaussians.size() * GAUSSIAN_LENGTH_W * sizeof(float);
float *h_gaussians = new float[numberOfGaussians * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians[j + i * GAUSSIAN_LENGTH_W] = gaussians[i][j];
}
}
SAFE_CALL(cudaMalloc<float>(&d_gaussians, gaussiansSize),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_gaussians, h_gaussians, gaussiansSize, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_histograms, HistogramsBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_histograms, h_histogram, HistogramsBytes, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_histograms2, HistogramsBytes2),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_histograms2, h_histogram2, HistogramsBytes2, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare.data(), ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare.data(), ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_PixelClass2, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Probability2, ProbBytes),"CUDA Malloc Failed");
delete[] h_histogram;
delete[] h_histogram2;
delete[] h_gaussians;
}
void ReserveCudaMemoryBall(std::vector<std::vector<float> > Histogram, std::vector<float> maxDistances, int _nHistogram, std::vector<int> kPerClasses,
int SizeHistograms, int _NumberOfClasses){
int HistogramsBytes = SizeHistograms * sizeof(float);
NumberOfClasses_ball =_NumberOfClasses;
nHistogram_ball = _nHistogram;
float *h_histogram = new float[nHistogram*SizeHistograms];
for(int i = 0; i < nHistogram; i++){
for(int j = 0; j < SizeHistograms; j++){
h_histogram[j+i*SizeHistograms] = Histogram[i][j];
}
}
SAFE_CALL(cudaMalloc<int>(&d_kPerClass_ball, sizeof(int) * kPerClasses.size()),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Histogram_ball, nHistogram * HistogramsBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_maxDistances_ball, sizeof(float) * maxDistances.size()),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_kPerClass_ball, kPerClasses.data(), sizeof(int) * kPerClasses.size(), cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_Histogram_ball, h_histogram, HistogramsBytes * nHistogram, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_maxDistances_ball, maxDistances.data(), sizeof(float) * maxDistances.size(), cudaMemcpyHostToDevice),"CUDA Malloc Failed");
delete[] h_histogram;
}
void FreeCudaMemory(){
//Free the device memory
SAFE_CALL(cudaFree(d_gaussians),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_histograms),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_histograms2),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_ParametersHeightForSquare),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_ParametersWidthForSquare),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_FieldImage),"CUDA Malloc Failed");
SAFE_CALL(cudaFree(d_PixelClass),"CUDA Malloc Failed");
SAFE_CALL(cudaFree(d_Probability),"CUDA Malloc Failed");
SAFE_CALL(cudaFree(d_PixelClass2),"CUDA Malloc Failed");
SAFE_CALL(cudaFree(d_Probability2),"CUDA Malloc Failed");
// SAFE_CALL(cudaFree(d_kPerClass),"CUDA Malloc Failed");
// SAFE_CALL(cudaFree(d_maxDistances),"CUDA Malloc Failed");
}
void ParticleFilterNClassCUDA(const cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability)
{
// cudaEvent_t start_hd, stop_hd;
// cudaEvent_t start_dh, stop_dh;
// cudaEvent_t start_k, stop_k;
// cudaEventCreate(&start_hd); cudaEventCreate(&stop_hd);
// cudaEventCreate(&start_dh); cudaEventCreate(&stop_dh);
// cudaEventCreate(&start_k); cudaEventCreate(&stop_k);
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
// cudaEventRecord(start_hd, 0);
SAFE_CALL(cudaMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
// cudaEventRecord(stop_hd, 0); cudaEventSynchronize(stop_hd);
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
// cudaEventRecord(start_k, 0);
//Launch the color conversion kernel
ParticleFilterNClassCUDA_kernel<<<grid, block>>>(d_FieldImage, d_Histogram, d_maxDistances, nHistogram,
d_kPerClass, NumberOfClasses,
d_ParametersHeightForSquare,d_ParametersWidthForSquare, FieldImage.cols, FieldImage.rows, FieldImage.step,
PixelClass.step, Probability.step, d_PixelClass, d_Probability);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
// cudaEventRecord(stop_k, 0); cudaEventSynchronize(stop_k);
// cudaEventRecord(start_dh, 0);
//Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(Probability.ptr(), d_Probability,ProbBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaFree(d_FieldImage),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_PixelClass),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_Probability),"CUDA Free Failed");
// cudaEventRecord(stop_dh, 0); cudaEventSynchronize(stop_dh);
// float hostToDeviceTime, deviceToHostTime, kernelTime;
// cudaEventElapsedTime(&hostToDeviceTime, start_hd, stop_hd);
// cudaEventElapsedTime(&deviceToHostTime, start_dh, stop_dh);
// cudaEventElapsedTime(&kernelTime, start_k, stop_k);
// printf("Tiempo de copiar datos de host to device %f \n", hostToDeviceTime);
// printf("Tiempo de copiar datos de device to host %f \n", deviceToHostTime);
// printf("Tiempo de kernel %f en milisegundos\n", kernelTime);
// cudaEventDestroy(start_hd); cudaEventDestroy(stop_hd);
// cudaEventDestroy(start_dh); cudaEventDestroy(stop_dh);
// cudaEventDestroy(start_k); cudaEventDestroy(stop_k);
}
void ParticleFilterBallCUDA(const cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability)
{
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
SAFE_CALL(cudaMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
ParticleFilterNClassCUDA_kernel<<<grid, block>>>(d_FieldImage, d_Histogram, d_maxDistances, nHistogram,
d_kPerClass, NumberOfClasses,
d_ParametersHeightForSquare,d_ParametersWidthForSquare, FieldImage.cols, FieldImage.rows, FieldImage.step,
PixelClass.step, Probability.step, d_PixelClass, d_Probability);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(cudaMemcpy(PixelClass.ptr(), d_PixelClass, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(Probability.ptr(), d_Probability, ProbBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaFree(d_FieldImage),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_PixelClass),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_Probability),"CUDA Free Failed");
}
__global__ void ParticleFilterBallCUDA_kernel( unsigned char* FieldImage,
float *Histogram,
float *distances,
int nHistogram,
int *kPerClass,
int numberOfClasses,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
int grayWidthStep,
int ProbWidthStep,
unsigned char* PixelClass,
float *Probability)
{
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
//Only valid threads perform memory I/O
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
FieldImage[color_tid] > 0 && FieldImage[color_tid + 1] > 0 && FieldImage[color_tid + 2] > 0){
const int gray_tid = yIndex * width + xIndex;
const int prob_tid = gray_tid; //yIndex * width + xIndex;
//Compute the window for this point
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/4.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/4.0);
int HHistogram[NBINS];
int LHistogram[NBINS];
int SHistogram[NBINS];
int bytesNbins = NBINS * sizeof(int);
memset(HHistogram, 0, bytesNbins);
memset(LHistogram, 0, bytesNbins);
memset(SHistogram, 0, bytesNbins);
int SumaValidPixels;
int limitii = min(yIndex + HalfWindowHeight, height - 1);
int limitjj = min(xIndex + HalfWindowWidth, width - 1);
for (int ii = max(yIndex - HalfWindowHeight, 0); ii < limitii; ii++){
for (int jj = max(xIndex - HalfWindowWidth, 0); jj < limitjj; jj++){
//Get the color image values
const int colorIdPixel = ii * colorWidthStep + (3 * jj);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (h_pixel > 0 && l_pixel > 0 && s_pixel > 0)
{
SumaValidPixels++;
HHistogram[(int)(h_pixel * NBINS/RANGE_H)]++;
LHistogram[(int)(l_pixel * NBINS/RANGE_L)]++;
SHistogram[(int)(s_pixel * NBINS/RANGE_S)]++;
}
}
}
float* Distance = new float[nHistogram];
for(int n = 0; n < nHistogram; n++){
Distance[n] = 0;
for (int K=0;K<NBINS;K++){
Distance[n] += sqrtf((HHistogram[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K]);
Distance[n] += sqrtf((LHistogram[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (NBINS)]);
Distance[n] += sqrtf((SHistogram[K]/(float)SumaValidPixels) * Histogram[HIST_SIZE * n + K + (2 * NBINS)]);
}
// Distance[n]=Distance[n]/((float)NBINS*6.0);
float Decay=1.0;///(float)SumaValidPixels;
Distance[n]=(1- (Distance[n]/6.0))*Decay;
}
float minDistance = Distance[0];
int minIndex = 0;
for(int n = 1; n < nHistogram; n++){
if(Distance[n] < minDistance){
minDistance = Distance[n];
minIndex = n;
}
}
delete[] Distance;
int kNum = 0;
for (int n = 0; n < numberOfClasses; n++) {
kNum += kPerClass[n];
if(minIndex < kNum) // && minDistance < distances[n])
{
PixelClass[gray_tid] = static_cast<unsigned char>(n + 1);
Probability[prob_tid] = static_cast<float>(minDistance);
break;
}
}
}
}
__global__ void ParticleFilterBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
float H = 0;
float L = 0;
float S = 0;
float n = 0;
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
for (int yy = max(yIndex - HalfWindowHeight, 0); yy < limityy; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n++;
H += h_pixel;
L += l_pixel;
S += s_pixel;
}
}
}
if(n > HalfWindowHeight * HalfWindowWidth *.2){
float percent = n * 100 / (4 * HalfWindowHeight * HalfWindowWidth) * 1000;
H /= n;
L /= n;
S /= n;
int maxIndex;
float maxProb = 0;
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH * k;
float PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
float PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
float PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
float prob = PH * PL * PS;
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
if(maxProb > gaussians[GAUSSIAN_LENGTH * maxIndex + 1])
{
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH * maxIndex]);
Probability[gray_tid] = static_cast<float>(maxProb*percent);
}
}
}
}
__global__ void ParticleFilterWindowsBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
int yy = max(yIndex - HalfWindowHeight, 0);
int step = (limityy - yy)/4;
int limitStep = yy + step;
float H[4];
float L[4];
float S[4];
float n[4];
bool validWindow = true;
int totalValidPixels = 0;
for (int i = 0; i < 4; i++, limitStep+=step){
H[i] = 0;
L[i] = 0;
S[i] = 0;
n[i] = 0;
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n[i]++;
H[i] += h_pixel;
L[i] += l_pixel;
S[i] += s_pixel;
}
}
}
H[i] /= n[i];
L[i] /= n[i];
S[i] /= n[i];
totalValidPixels += n[i];
}
// if(n[0] < (2 * HalfWindowWidth * step) * 0.1 ){
// validWindow = false;
// }
for (int i = 1; i < 4; ++i) {
if(n[i] < (2 * HalfWindowWidth * step) * 0.2 ){
validWindow = false;
break;
}
}
if(validWindow){
float percent = totalValidPixels * 100 / (4 * HalfWindowHeight * HalfWindowWidth) * 100000;
int maxIndex;
float maxProb = 0;
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH_W * k + 2;
float prob = 1;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + 6 * i;
float PH = exp( (H[i] - gaussians[kernelPos])*(H[i]-gaussians[kernelPos]) / (-2*gaussians[kernelPos + 1])) / sqrt(2* M_PI *gaussians[kernelPos + 1]);
float PL = exp( (L[i] - gaussians[kernelPos + 2])*(L[i]-gaussians[kernelPos + 2]) / (-2*gaussians[kernelPos + 3])) / sqrt(2* M_PI *gaussians[kernelPos + 3]);
float PS = exp( (S[i] - gaussians[kernelPos + 4])*(S[i]-gaussians[kernelPos + 4]) / (-2*gaussians[kernelPos + 5])) / sqrt(2* M_PI *gaussians[kernelPos + 5]);
prob += PH * PL * PS;
}
if(prob == 1)
printf("uno\n");
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
if(maxProb > gaussians[GAUSSIAN_LENGTH_W * maxIndex + 1])
{
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH_W * maxIndex]);
Probability[gray_tid] = static_cast<float>(maxProb*percent);
}
}
}
}
__global__ void DoubleParticleFilterWindowsBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
unsigned char* PixelClass2,
float *Probability2,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians, // Equipo 1, equipo 2 y pasto
int gaussiansNumber,
float *gaussians2, // Para porteros y arbitros
int gaussiansNumber2
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
int yy = max(yIndex - HalfWindowHeight, 0);
int step = (limityy - yy)/4;
int limitStep = yy + step;
float H[4];
float L[4];
float S[4];
float n[4];
float e[4];
bool validWindow = true;
int totalValidPixels = 0;
int totalEdgePixels=0;
for (int i = 0; i < 4; i++, limitStep+=step){
H[i] = 0;
L[i] = 0;
S[i] = 0;
n[i] = 0;
e[i] = 0;
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int edgepixelid = yy * width + xx;
const unsigned char edge_pixel=PixelClass2[edgepixelid];
if(edge_pixel>0){
e[i]++;
totalEdgePixels++;
}
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n[i]++;
H[i] += h_pixel;
L[i] += l_pixel;
S[i] += s_pixel;
}
}
}
H[i] /= n[i];
L[i] /= n[i];
S[i] /= n[i];
totalValidPixels += n[i];
}
for (int i = 1; i < 4; ++i) {
if(n[i] < (2 * HalfWindowWidth * step) * 0.2 ){
validWindow = false;
break;
}
}
for (int i = 1; i < 4; ++i) {
if(e[i] < 1 ){
validWindow = false;
break;
}
}
float percentEdge = totalEdgePixels * 100 / (WINDOWS_NUMBER * HalfWindowHeight * HalfWindowWidth);
if(percentEdge<5){
validWindow=false;
}
if(validWindow){
float percent = totalValidPixels * 100 / (WINDOWS_NUMBER * HalfWindowHeight * HalfWindowWidth);
if (percent>40 && percent<70)
percent=80;
else if(percent>=70)
percent=100-percent;
// printf("p: %f\n",percent);
int maxIndex = 0;
float maxProb = 0;
float minDist;
#if 0
if (xIndex == 2251 && yIndex == 582) {
printf("--- Point\n");
printf("x = %d y = %d\n", xIndex, yIndex);
printf("--- Window dimensions\n");
printf("lx = %d ly = %d\n", HalfWindowWidth*2, HalfWindowHeight*2);
printf("--- Window\n");
printf("X: start = %d end = %d\n", max(xIndex - HalfWindowWidth, 0), limitxx);
printf("Y: start = %d end = %d\n", max(yIndex - HalfWindowHeight, 0), max(yIndex - HalfWindowHeight, 0) + 4*step);
printf("--- Gaussians\n");
printf("Number of gaussians = %d\n", gaussiansNumber);
printf("Gaussian length = %d\n", GAUSSIAN_LENGTH);
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = (GAUSSIAN_LENGTH_W * k) + 2;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + (6 * i);
for (unsigned int p = 0; p < 6; p+=2)
printf("%f %f ", gaussians[kernelPos+p], gaussians[kernelPos+1+p]);
}
}
printf("--- Color\n");
printf("Color width step = %d\n", colorWidthStep);
printf("\n");
printf("--- Colors in window\n");
yy = max(yIndex - HalfWindowHeight, 0);
limitStep = yy + step;
for (int i = 0; i < 4; i++, limitStep+=step) {
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++) {
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)) {
printf("h = %u l = %u s = %u\n", h_pixel, l_pixel, s_pixel);
}
}
}
}
}
#endif
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = (GAUSSIAN_LENGTH_W * k) + 2;
float prob = 0;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + (6 * i);
/*
float PH = exp((-0.5 * (H[i] - gaussians[kernelPos ])*(H[i]-gaussians[kernelPos ])) / (gaussians[kernelPos + 1])) / (gaussians[kernelPos + 1]*2.506628);
float PL = exp((-0.5 * (L[i] - gaussians[kernelPos + 2])*(L[i]-gaussians[kernelPos + 2])) / (gaussians[kernelPos + 3])) / (gaussians[kernelPos + 3]*2.506628);
float PS = exp((-0.5 * (S[i] - gaussians[kernelPos + 4])*(S[i]-gaussians[kernelPos + 4])) / (gaussians[kernelPos + 5])) / (gaussians[kernelPos + 5]*2.506628);
//*/
// Distances to the mean
float PH = (H[i] - gaussians[kernelPos ])*(H[i] - gaussians[kernelPos ]);
float PL = (L[i] - gaussians[kernelPos + 2])*(L[i] - gaussians[kernelPos + 2]);
float PS = (S[i] - gaussians[kernelPos + 4])*(S[i] - gaussians[kernelPos + 4]);
//prob += log(1+PH) + log(1+PL) + log(1+PS);
prob += PH + PL + PS;
//printf("p: %f, ", prob);
}
prob=sqrt(prob);
// if(prob == 1)
// printf("uno\n");
// if(prob > maxProb){
// maxProb = prob;
// maxIndex = k;
// // printf("aquillegamos");
// }
if(k==0){
minDist=prob;
maxIndex = k;
}
else if(prob<minDist){
minDist=prob;
maxIndex = k;
}
}
// if(maxProb > gaussians[GAUSSIAN_LENGTH_W * maxIndex + 1])
// {
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH_W * maxIndex]);
Probability[gray_tid] = static_cast<float>(minDist);//maxProb);
// }
return;
maxIndex = maxProb = 0;
for (unsigned k = 0; k < gaussiansNumber2; k++){
int gausPos = GAUSSIAN_LENGTH_W * k + 2;
float prob = 0;
for (int i = 1; i < 4; ++i) {
int kernelPos = gausPos + 6 * i;
float PH = exp( (H[i] - gaussians2[kernelPos])*(H[i]-gaussians2[kernelPos]) / (-2*gaussians2[kernelPos + 1])) / sqrt(2* M_PI *gaussians2[kernelPos + 1]);
float PL = exp( (L[i] - gaussians2[kernelPos + 2])*(L[i]-gaussians2[kernelPos + 2]) / (-2*gaussians2[kernelPos + 3])) / sqrt(2* M_PI *gaussians2[kernelPos + 3]);
float PS = exp( (S[i] - gaussians2[kernelPos + 4])*(S[i]-gaussians2[kernelPos + 4]) / (-2*gaussians2[kernelPos + 5])) / sqrt(2* M_PI *gaussians2[kernelPos + 5]);
prob += PH * PL * PS;
}
if(prob == 1)
printf("uno\n");
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
if(maxProb > gaussians2[GAUSSIAN_LENGTH_W * maxIndex + 1])
{
const int gray_tid = yIndex * width + xIndex;
PixelClass2[gray_tid] = static_cast<unsigned char>(gaussians2[GAUSSIAN_LENGTH_W * maxIndex]);
Probability2[gray_tid] = static_cast<float>(maxProb*percent);
}
}
}
}
__global__ void DoubleParticleFilterWindowsHistogram_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
unsigned char* PixelClass2,
float *Probability2,
int numberModels,
float* histograms,
int numberModels2,
float* histograms2,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep
// unsigned char* PixelClass2,
// float *Probability2,
// float *gaussians,
// int gaussiansNumber,
// float *gaussians2,
// int gaussiansNumber2
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int gray_tid = yIndex * width + xIndex;
const int prob_tid = gray_tid; //yIndex * width + xIndex;
const int HalfWindowWidth = (int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = (int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
int yy = max(yIndex - HalfWindowHeight, 0);
int step = (limityy - yy)/WINDOWS_NUMBER;
int limitStep = yy + step;
int validNumber[WINDOWS_NUMBER];
int histogramH[WINDOWS_NUMBER][NBINS];
int histogramL[WINDOWS_NUMBER][NBINS];
int histogramS[WINDOWS_NUMBER][NBINS];
bool validWindow = true;
int totalValidPixels = 0;
const int BYTES_HIST = NBINS * sizeof(int);
///Calcula histograma de esta particula
for (int i = 0; i < WINDOWS_NUMBER; i++, limitStep+=step){
memset(histogramH[i], 0, BYTES_HIST);
memset(histogramL[i], 0, BYTES_HIST);
memset(histogramS[i], 0, BYTES_HIST);
validNumber[i] = 0;
for (; yy < limitStep; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0))
{
validNumber[i]++;
histogramH[i][(int)(h_pixel * NBINS/RANGE_H)]++;
histogramL[i][(int)(l_pixel * NBINS/RANGE_L)]++;
histogramS[i][(int)(s_pixel * NBINS/RANGE_S)]++;
}
}
}
totalValidPixels += validNumber[i];
}
/// Validar ventana por cantidad de pixeles, puede morir
// if(validNumber[0] < (2 * HalfWindowWidth * step) * 0.1 ){
// validWindow = false;
// }
for (int i = 1; i < WINDOWS_NUMBER; ++i) {
if(validNumber[i] < (2 * HalfWindowWidth * step) * 0.20 ){
validWindow = false;
break;
}
}
if(validWindow){
float percent = totalValidPixels * 100 / (WINDOWS_NUMBER * HalfWindowHeight * HalfWindowWidth);
if (percent>40 && percent<70)
percent=80;
else if(percent>=70)
percent=100-percent;
// printf("p: %f\n",percent);
float* distances = new float[numberModels];
///Checar si se parecen los histogramas
for(int i = 0; i < numberModels; i++){
distances[i] = 0;
for (int k = 1; k < WINDOWS_NUMBER; k++){
int histogramPosition = (FEATURES_SIZE * i) + (k * HIST_SIZE);
for (int j = 0; j < NBINS; j++){
distances[i] += sqrtf((histogramH[k][j]/(float)validNumber[k]) * histograms[histogramPosition + j]);
distances[i] += sqrtf((histogramL[k][j]/(float)validNumber[k]) * histograms[histogramPosition + j + (NBINS)]);
distances[i] += sqrtf((histogramS[k][j]/(float)validNumber[k]) * histograms[histogramPosition + j + (2 * NBINS)]);
}
}
// distances[n] = distances[n]/((float)NBINS*6.0);
distances[i] = (1-(distances[i]/(3*(WINDOWS_NUMBER-1)))); //* SumaValidPixels;
// distances[i] = (3*(WINDOWS_NUMBER-1))-(distances[i]); //* SumaValidPixels;
}
float minDistance = distances[0];
int minIndex = 0;
for(int i = 1; i < numberModels; i++){
if(distances[i] < minDistance){
minDistance = distances[i];
minIndex = i;
}
}
PixelClass[gray_tid] = static_cast<unsigned char>(histograms[FEATURES_SIZE * minIndex]);
Probability[prob_tid] = static_cast<float>((minDistance * percent)); //+sqrtf(yIndex/1000)
delete[] distances;
return;
if(xIndex > (width/3) && xIndex < 2*(width/3))
return;
//&& yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
distances = new float[numberModels];
///Checar si se parecen los histogramas (Porteros y arbitros)
for(int i = 0; i < numberModels2; i++){
distances[i] = 0;
for (int k = 1; k < WINDOWS_NUMBER; k++){
int histogramPosition = FEATURES_SIZE * i + k * HIST_SIZE;
for (int j = 0; j < NBINS; j++){
distances[i] += sqrtf((histogramH[k][j]/(float)totalValidPixels) * histograms2[histogramPosition + j]);
distances[i] += sqrtf((histogramL[k][j]/(float)totalValidPixels) * histograms2[histogramPosition + j + (NBINS)]);
distances[i] += sqrtf((histogramS[k][j]/(float)totalValidPixels) * histograms2[histogramPosition + j + (2 * NBINS)]);
}
}
// distances[n] = distances[n]/((float)NBINS*6.0);
distances[i] = (1-(distances[i]/(3*(WINDOWS_NUMBER-1)))); //* SumaValidPixels;
}
minDistance = distances[0];
minIndex = 0;
for(int i = 1; i < numberModels2; i++){
if(distances[i] < minDistance){
minDistance = distances[i];
minIndex = i;
}
}
PixelClass2[gray_tid] = static_cast<unsigned char>(histograms2[FEATURES_SIZE * minIndex]);
Probability2[prob_tid] = static_cast<float>(minDistance * percent);
delete[] distances;
}
}
}
void ReserveCudaMemoryBayes(std::vector<std::vector<float> > gaussians, std::vector<float> ParametersHeightForSquare, std::vector<float> ParametersWidthForSquare, cv::Mat FieldImage, cv::Mat PixelClass, cv::Mat Probability){
int ParametersForSquareBytes = 4 * sizeof(float);
int gaussiansSize = gaussians.size() * GAUSSIAN_LENGTH_W * sizeof(float);
numberOfGaussians = gaussians.size();
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
float *h_gaussians = new float[numberOfGaussians * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians[j + i * GAUSSIAN_LENGTH_W] = gaussians[i][j];
}
}
SAFE_CALL(cudaMalloc<float>(&d_gaussians, gaussiansSize),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_gaussians, h_gaussians, gaussiansSize, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare.data(), ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare.data(), ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
delete[] h_gaussians;
}
void ReserveCudaMemoryBayes(std::vector<std::vector<float> > gaussians,std::vector<std::vector<float> > gaussians2, std::vector<float> ParametersHeightForSquare, std::vector<float> ParametersWidthForSquare, cv::Mat FieldImage, cv::Mat PixelClass, cv::Mat Probability){
int ParametersForSquareBytes = 4 * sizeof(float);
int gaussiansSize = gaussians.size() * GAUSSIAN_LENGTH_W * sizeof(float);
int gaussiansSize2 = gaussians2.size() * GAUSSIAN_LENGTH_W * sizeof(float);
numberOfGaussians = gaussians.size();
numberOfGaussians2 = gaussians2.size();
colorBytes = FieldImage.step * FieldImage.rows;
grayBytes = PixelClass.step * PixelClass.rows;
ProbBytes = Probability.cols * Probability.rows * sizeof(float) ;
float *h_gaussians = new float[numberOfGaussians * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians[j + i * GAUSSIAN_LENGTH_W] = gaussians[i][j];
}
}
float *h_gaussians2 = new float[numberOfGaussians2 * GAUSSIAN_LENGTH_W];
for(int i = 0; i < numberOfGaussians2; i++){
for(int j = 0; j < GAUSSIAN_LENGTH_W; j++){
h_gaussians2[j + i * GAUSSIAN_LENGTH_W] = gaussians2[i][j];
}
}
SAFE_CALL(cudaMalloc<float>(&d_gaussians, gaussiansSize),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_gaussians, h_gaussians, gaussiansSize, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_gaussians2, gaussiansSize2),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_gaussians2, h_gaussians2, gaussiansSize2, cudaMemcpyHostToDevice),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_ParametersHeightForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_ParametersWidthForSquare, ParametersForSquareBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_ParametersHeightForSquare, ParametersHeightForSquare.data(), ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(d_ParametersWidthForSquare, ParametersWidthForSquare.data(), ParametersForSquareBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_FieldImage, colorBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_PixelClass, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Probability, ProbBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_PixelClass2, grayBytes),"CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<float>(&d_Probability2, ProbBytes),"CUDA Malloc Failed");
delete[] h_gaussians;
delete[] h_gaussians2;
}
void FreeCudaMemoryBayes(){
//Free the device memory
SAFE_CALL(cudaFree(d_gaussians),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_ParametersHeightForSquare),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_ParametersWidthForSquare),"CUDA Free Failed");
SAFE_CALL(cudaFree(d_FieldImage),"CUDA Malloc Failed");
SAFE_CALL(cudaFree(d_PixelClass),"CUDA Malloc Failed");
SAFE_CALL(cudaFree(d_Probability),"CUDA Malloc Failed");
}
__global__ void ParticleFilterPixelsBayes_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int colorIdPixel = yIndex * colorWidthStep + (3 * xIndex);
const unsigned char H = FieldImage[colorIdPixel];
const unsigned char L = FieldImage[colorIdPixel + 1];
const unsigned char S = FieldImage[colorIdPixel + 2];
if(!(H==0 && L==0 && S==0)){
int maxIndex;
double maxProb = 0;
for (int k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH * k;
double PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
double PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
double PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
double prob = PH * PL * PS;
//printf("%f %f %f = %f\n",PH,PL,PS,prob);
if(gaussians[gausPos] == 1 && prob > 0){
maxProb = prob;
maxIndex = k;
//printf("p: %f\n",prob);
break;
}
if(prob > maxProb){
maxProb = prob;
maxIndex = k;
}
}
//printf("prob: %f k: %d\n", maxProb, maxIndex); //gaussians[GAUSSIAN_LENGTH * maxIndex]);
//printf("%f\n",gaussians[GAUSSIAN_LENGTH * maxIndex]);
const int gray_tid = yIndex * width + xIndex;
PixelClass[gray_tid] = static_cast<unsigned char>(gaussians[GAUSSIAN_LENGTH * maxIndex]);
Probability[gray_tid] = static_cast<float>(maxProb);
}
}
}
__global__ void RemoveByClass_kernel( unsigned char* FieldImage,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const unsigned char H = FieldImage[color_tid];
const unsigned char L = FieldImage[color_tid + 1];
const unsigned char S = FieldImage[color_tid + 2];
for (int k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH_W * k;
double PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
double PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
double PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
double prob = PH * PL * PS;
if(gaussians[gausPos] == 1 && prob > 0){
FieldImage[color_tid] = 0;
FieldImage[color_tid + 1] = 0;
FieldImage[color_tid + 2] = 0;
break;
}
}
}
}
__global__ void nonMaximumSuppression_kernel(
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int gray_tid = yIndex * width + xIndex;
if(xIndex < width && yIndex < height && PixelClass[gray_tid] != 0 //&& xIndex % 4 == 0 && yIndex % 4 == 0
){
const int NeighborhoodX = (int)((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3]);
const int NeighborhoodY = (int)((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3]);
int ClassValue = PixelClass[gray_tid];
double ProbValue = Probability[gray_tid];
bool IsMaximum = true;
for (int yy = yIndex - NeighborhoodY; IsMaximum && yy <= yIndex + NeighborhoodY; yy++) {
for (int xx= xIndex - NeighborhoodX; IsMaximum && xx <= xIndex + NeighborhoodX; xx++){
if (!(yy == yIndex && xx == xIndex) && yy < height && xx < width && yy > 0 && xx > 0){
const int pixelPosition = yy * width + xx;
// if (PixelClass[pixelPosition]>0 && Probability[pixelPosition] >ProbValue){
if (PixelClass[pixelPosition]>0 && Probability[pixelPosition] < ProbValue) {
// if ((PixelClass[pixelPosition] == ClassValue && Probability[pixelPosition] < ProbValue)){
PixelClass[gray_tid] = 0;
IsMaximum = false;
return;
}
}
}
}
}
}
__global__ void RemoveByClassWindow_kernel( unsigned char* FieldImage,
unsigned char* PixelClass,
float *Probability,
float *ParametersHeightforSquare,
float *ParametersWidthforSquare,
int width,
int height,
int colorWidthStep,
float *gaussians,
int gaussiansNumber
)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = yIndex * colorWidthStep + (3 * xIndex);
if(xIndex < width && yIndex < height && //xIndex % 4 == 0 && yIndex % 4 == 0 &&
!(FieldImage[color_tid] == 0 && FieldImage[color_tid + 1] == 0 && FieldImage[color_tid + 2] == 0)){
const int HalfWindowWidth =1;//(int)round(((((float)yIndex-ParametersWidthforSquare[2])*(ParametersWidthforSquare[1]/ParametersWidthforSquare[0]))+ParametersWidthforSquare[3])/2.0f);
const int HalfWindowHeight = 1;//(int)round(((((float)yIndex-ParametersHeightforSquare[2])*(ParametersHeightforSquare[1]/ParametersHeightforSquare[0]))+ParametersHeightforSquare[3])/2.0f);
float H = 0;
float L = 0;
float S = 0;
float n = 0;
int limityy = min(yIndex + HalfWindowHeight, height - 1);
int limitxx = min(xIndex + HalfWindowWidth, width - 1);
for (int yy = max(yIndex - HalfWindowHeight, 0); yy < limityy; yy++){
for (int xx = max(xIndex - HalfWindowWidth, 0); xx < limitxx; xx++){
const int colorIdPixel = yy * colorWidthStep + (3 * xx);
const unsigned char h_pixel = FieldImage[colorIdPixel];
const unsigned char l_pixel = FieldImage[colorIdPixel + 1];
const unsigned char s_pixel = FieldImage[colorIdPixel + 2];
if (!(h_pixel == 0 && l_pixel == 0 && s_pixel == 0)){
n++;
H += h_pixel;
L += l_pixel;
S += s_pixel;
}
}
}
H /= n;
L /= n;
S /= n;
// FieldImage[color_tid] = 0;
// FieldImage[color_tid + 1] = 0;
// FieldImage[color_tid + 2] = 0;
for (unsigned k = 0; k < gaussiansNumber; k++){
int gausPos = GAUSSIAN_LENGTH * k;
float PH = exp( (H - gaussians[gausPos + 2])*(H-gaussians[gausPos + 2]) / (-2*gaussians[gausPos + 3])) / sqrt(2* M_PI *gaussians[gausPos + 3]);
float PL = exp( (L - gaussians[gausPos + 4])*(L-gaussians[gausPos + 4]) / (-2*gaussians[gausPos + 5])) / sqrt(2* M_PI *gaussians[gausPos + 5]);
float PS = exp( (S - gaussians[gausPos + 6])*(S-gaussians[gausPos + 6]) / (-2*gaussians[gausPos + 7])) / sqrt(2* M_PI *gaussians[gausPos + 7]);
float prob = PH * PL * PS;
if(gaussians[gausPos] == 1 && prob > 0){
// FieldImage[color_tid] = 180;
// FieldImage[color_tid + 1] = 255;
// FieldImage[color_tid + 2] = 255;
FieldImage[color_tid] = 0;
FieldImage[color_tid + 1] = 0;
FieldImage[color_tid + 2] = 0;
break;
}
}
}
}
void ParticleFilterBayesCUDA(cv::Mat &FieldImage, cv::Mat& PixelClass, cv::Mat& Probability)
{
SAFE_CALL(cudaMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
RemoveByClass_kernel<<<grid, block>>>(d_FieldImage, FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// RemoveByClassWindow_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
// cv::Mat img = FieldImage.clone();
// SAFE_CALL(cudaMemcpy(img.ptr(), d_FieldImage, colorBytes, cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
// cv::cvtColor(img, img, CV_HLS2BGR);
// cv::imshow("img", img);
ParticleFilterWindowsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// ParticleFilterPixelsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
nonMaximumSuppression_kernel<<<grid, block>>>(d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(cudaMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(Probability.ptr(), d_Probability,ProbBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
}
void ParticleFilterBayesCUDA(cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability, cv::Mat& PixelClass2, cv::Mat& Probability2)
{
SAFE_CALL(cudaMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(d_PixelClass2, PixelClass2.ptr(), grayBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
RemoveByClass_kernel<<<grid, block>>>(d_FieldImage, FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// RemoveByClassWindow_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
// cv::Mat img = FieldImage.clone();
// SAFE_CALL(cudaMemcpy(img.ptr(), d_FieldImage, colorBytes, cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
// cv::cvtColor(img, img, CV_HLS2BGR);
// cv::imshow("img", img);
DoubleParticleFilterWindowsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_PixelClass2, d_Probability2, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians, d_gaussians2, numberOfGaussians2);
// ParticleFilterPixelsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(cudaMemset(d_PixelClass2, 0, grayBytes),"CUDA Memset Failed");
nonMaximumSuppression_kernel<<<grid, block>>>(d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
//Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
nonMaximumSuppression_kernel<<<grid, block>>>(d_PixelClass2, d_Probability2, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(cudaMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(Probability.ptr(), d_Probability,ProbBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(PixelClass2.ptr(), d_PixelClass2,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(Probability2.ptr(), d_Probability2,ProbBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(FieldImage.ptr(), d_FieldImage, colorBytes, cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
}
void ParticleFilterHistCUDA(cv::Mat& FieldImage, cv::Mat& PixelClass, cv::Mat& Probability, cv::Mat& PixelClass2, cv::Mat& Probability2)
{
SAFE_CALL(cudaMemcpy(d_FieldImage, FieldImage.ptr(), colorBytes, cudaMemcpyHostToDevice),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemset(d_PixelClass, 0, grayBytes),"CUDA Memset Failed");
SAFE_CALL(cudaMemset(d_PixelClass2, 0, grayBytes),"CUDA Memset Failed");
//Specify a reasonable block size
const dim3 block(BLOCKSIZE ,BLOCKSIZE);
//Calculate grid size to cover the whole image
const dim3 grid((FieldImage.cols + block.x - 1)/block.x, (FieldImage.rows + block.y - 1)/block.y);
RemoveByClass_kernel<<<grid, block>>>(d_FieldImage, FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
// RemoveByClassWindow_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
//cv::Mat img = FieldImage.clone();
//SAFE_CALL(cudaMemcpy(img.ptr(), d_FieldImage, colorBytes, cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
//cv::cvtColor(img, img, CV_HLS2BGR);
//cv::imshow("img", img);
DoubleParticleFilterWindowsHistogram_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability, d_PixelClass2, d_Probability2,
numberHistograms, d_histograms, numberHistograms2, d_histograms2,
d_ParametersHeightForSquare, d_ParametersWidthForSquare,
FieldImage.cols, FieldImage.rows, FieldImage.step);
// ParticleFilterPixelsBayes_kernel<<<grid, block>>>(d_FieldImage, d_PixelClass, d_Probability,
// FieldImage.cols, FieldImage.rows, FieldImage.step, d_gaussians, numberOfGaussians);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
nonMaximumSuppression_kernel<<<grid, block>>>(d_PixelClass, d_Probability, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
// nonMaximumSuppression_kernel<<<grid, block>>>(d_PixelClass2, d_Probability2, d_ParametersHeightForSquare, d_ParametersWidthForSquare,FieldImage.cols, FieldImage.rows);
// SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
SAFE_CALL(cudaMemcpy(PixelClass.ptr(), d_PixelClass,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(Probability.ptr(), d_Probability,ProbBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(PixelClass2.ptr(), d_PixelClass2,grayBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(Probability2.ptr(), d_Probability2,ProbBytes,cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(FieldImage.ptr(), d_FieldImage, colorBytes, cudaMemcpyDeviceToHost),"CUDA Memcpy Host To Device Failed");
}
|
96798c3afed0556a1278755e08886ec63ee55530.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void mult(int *a,int *b,int *c,int wa,int wb) {
int id = threadIdx.x;
int sum = 0;
for(int i=0; i<wb; i++) {
sum = 0;
for(int j=0; j<wa; j++)
sum = sum + a[id*wa+j]*b[j*wb+i];
c[id*wb+i] = sum;
}
}
int main() {
int ha,wa,wb;
printf("Enter ha,wa,wb: ");
scanf("%d %d %d",&ha,&wa,&wb);
int a[ha][wa],b[wa][wb];
int c[ha][wb];
printf("Enter A:\n");
for(int i=0; i<ha; i++){
for(int j = 0; j < wa; j++) {
scanf("%d",&a[i][j]);
}
}
printf("Enter B:\n");
for(int i=0; i<wa; i++){
for(int j = 0; j < wb; j++) {
scanf("%d",&b[i][j]);
}
}
int *d_a,*d_b,*d_c;
int size = sizeof(int);
hipMalloc((void**)&d_a,size*ha*wa);
hipMalloc((void**)&d_b,size*wa*wb);
hipMalloc((void**)&d_c,size*ha*wb);
hipMemcpy(d_a,&a,size*ha*wa,hipMemcpyHostToDevice);
hipMemcpy(d_b,&b,size*wa*wb,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mult), dim3(1),dim3(ha), 0, 0, d_a,d_b,d_c,wa,wb);
hipMemcpy(&c,d_c,size*ha*wb,hipMemcpyDeviceToHost);
printf("C:\n");
for(int i=0; i<ha; i++) {
for(int j=0; j<wb; j++)
printf("%d ",c[i][j]);
printf("\n");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| 96798c3afed0556a1278755e08886ec63ee55530.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void mult(int *a,int *b,int *c,int wa,int wb) {
int id = threadIdx.x;
int sum = 0;
for(int i=0; i<wb; i++) {
sum = 0;
for(int j=0; j<wa; j++)
sum = sum + a[id*wa+j]*b[j*wb+i];
c[id*wb+i] = sum;
}
}
int main() {
int ha,wa,wb;
printf("Enter ha,wa,wb: ");
scanf("%d %d %d",&ha,&wa,&wb);
int a[ha][wa],b[wa][wb];
int c[ha][wb];
printf("Enter A:\n");
for(int i=0; i<ha; i++){
for(int j = 0; j < wa; j++) {
scanf("%d",&a[i][j]);
}
}
printf("Enter B:\n");
for(int i=0; i<wa; i++){
for(int j = 0; j < wb; j++) {
scanf("%d",&b[i][j]);
}
}
int *d_a,*d_b,*d_c;
int size = sizeof(int);
cudaMalloc((void**)&d_a,size*ha*wa);
cudaMalloc((void**)&d_b,size*wa*wb);
cudaMalloc((void**)&d_c,size*ha*wb);
cudaMemcpy(d_a,&a,size*ha*wa,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size*wa*wb,cudaMemcpyHostToDevice);
mult<<<1,ha>>>(d_a,d_b,d_c,wa,wb);
cudaMemcpy(&c,d_c,size*ha*wb,cudaMemcpyDeviceToHost);
printf("C:\n");
for(int i=0; i<ha; i++) {
for(int j=0; j<wb; j++)
printf("%d ",c[i][j]);
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
b403442a2ffa92e501854db144cf043f7704fb72.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2020-2022 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include "../../../../src/tree/gpu_hist/evaluate_splits.cuh"
#include "../../helpers.h"
#include "../../histogram_helpers.h"
#include "../test_evaluate_splits.h" // TestPartitionBasedSplit
namespace xgboost {
namespace tree {
namespace {
auto ZeroParam() {
auto args = Args{{"min_child_weight", "0"},
{"lambda", "0"}};
TrainParam tparam;
tparam.UpdateAllowUnknown(args);
return tparam;
}
} // anonymous namespace
TEST_F(TestCategoricalSplitWithMissing, GPUHistEvaluator) {
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0};
GPUTrainingParam param{param_};
cuts_.cut_ptrs_.SetDevice(0);
cuts_.cut_values_.SetDevice(0);
cuts_.min_vals_.SetDevice(0);
thrust::device_vector<GradientPairPrecise> feature_histogram{feature_histogram_};
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
auto d_feature_types = dh::ToSpan(feature_types);
EvaluateSplitInputs input{1, 0, parent_sum_, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts_.cut_ptrs_.ConstDeviceSpan(),
cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{param_, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts_, dh::ToSpan(feature_types), feature_set.size(), param_, 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
ASSERT_EQ(result.thresh, 1);
this->CheckResult(result.loss_chg, result.findex, result.fvalue, result.is_cat,
result.dir == kLeftDir, result.left_sum, result.right_sum);
}
TEST(GpuHist, PartitionBasic) {
TrainParam tparam = ZeroParam();
tparam.max_cat_to_onehot = 0;
GPUTrainingParam param{tparam};
common::HistogramCuts cuts;
cuts.cut_values_.HostVector() = std::vector<float>{0.0, 1.0, 2.0};
cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 3};
cuts.min_vals_.HostVector() = std::vector<float>{0.0};
cuts.cut_ptrs_.SetDevice(0);
cuts.cut_values_.SetDevice(0);
cuts.min_vals_.SetDevice(0);
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types;
auto max_cat =
*std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
d_feature_types = dh::ToSpan(feature_types);
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}};
EvaluateSplitInputs input{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_EQ(cats, std::bitset<32>("11000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-7.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-3.0, 1.0}, {-3.0, 1.0}};
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_EQ(cats, std::bitset<32>("10000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// All -1.0, gain from splitting should be 0.0
GradientPairPrecise parent_sum(-3.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}};
EvaluateSplitInputs input{2, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_FLOAT_EQ(result.loss_chg, 0.0f);
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
// With 3.0/3.0 missing values
// Forward, first 2 categories are selected, while the last one go to left along with missing value
{
GradientPairPrecise parent_sum(0.0, 6.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}};
EvaluateSplitInputs input{3, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(cats, std::bitset<32>("11000000000000000000000000000000"));
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-3.0, 1.0}, {-1.0, 1.0}};
EvaluateSplitInputs input{4, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_EQ(cats, std::bitset<32>("10100000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-3.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}};
EvaluateSplitInputs input{5, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(cats, std::bitset<32>("01000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
}
TEST(GpuHist, PartitionTwoFeatures) {
TrainParam tparam = ZeroParam();
tparam.max_cat_to_onehot = 0;
GPUTrainingParam param{tparam};
common::HistogramCuts cuts;
cuts.cut_values_.HostVector() = std::vector<float>{0.0, 1.0, 2.0, 0.0, 1.0, 2.0};
cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 3, 6};
cuts.min_vals_.HostVector() = std::vector<float>{0.0, 0.0};
cuts.cut_ptrs_.SetDevice(0);
cuts.cut_values_.SetDevice(0);
cuts.min_vals_.SetDevice(0);
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types(dh::ToSpan(feature_types));
auto max_cat =
*std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{
{-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
EvaluateSplitInputs input{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(cats, std::bitset<32>("11000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{
{-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0}};
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(cats, std::bitset<32>("10000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
}
TEST(GpuHist, PartitionTwoNodes) {
TrainParam tparam = ZeroParam();
tparam.max_cat_to_onehot = 0;
GPUTrainingParam param{tparam};
common::HistogramCuts cuts;
cuts.cut_values_.HostVector() = std::vector<float>{0.0, 1.0, 2.0};
cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 3};
cuts.min_vals_.HostVector() = std::vector<float>{0.0};
cuts.cut_ptrs_.SetDevice(0);
cuts.cut_values_.SetDevice(0);
cuts.min_vals_.SetDevice(0);
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types(dh::ToSpan(feature_types));
auto max_cat =
*std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram_a =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0},
{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
thrust::device_vector<EvaluateSplitInputs> inputs(2);
inputs[0] = EvaluateSplitInputs{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_a)};
thrust::device_vector<GradientPairPrecise> feature_histogram_b =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
inputs[1] = EvaluateSplitInputs{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_b)};
thrust::device_vector<GPUExpandEntry> results(2);
evaluator.EvaluateSplits({0, 1}, 1, dh::ToSpan(inputs), shared_inputs, dh::ToSpan(results));
GPUExpandEntry result_a = results[0];
GPUExpandEntry result_b = results[1];
EXPECT_EQ(std::bitset<32>(evaluator.GetHostNodeCats(0)[0]),
std::bitset<32>("10000000000000000000000000000000"));
EXPECT_EQ(std::bitset<32>(evaluator.GetHostNodeCats(1)[0]),
std::bitset<32>("11000000000000000000000000000000"));
}
}
void TestEvaluateSingleSplit(bool is_categorical) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
common::HistogramCuts cuts{MakeCutsForTest({1.0, 2.0, 11.0, 12.0}, {0, 2, 4}, {0.0, 0.0}, 0)};
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1};
// Setup gradients so that second feature gets higher gain
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
dh::device_vector<FeatureType> feature_types(feature_set.size(),
FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types;
if (is_categorical) {
auto max_cat = *std::max_element(cuts.cut_values_.HostVector().begin(),
cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
d_feature_types = dh::ToSpan(feature_types);
}
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{
tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 1);
if (is_categorical) {
ASSERT_TRUE(std::isnan(result.fvalue));
} else {
EXPECT_EQ(result.fvalue, 11.0);
}
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(),
parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(),
parent_sum.GetHess());
}
TEST(GpuHist, EvaluateSingleSplit) {
TestEvaluateSingleSplit(false);
}
TEST(GpuHist, EvaluateSingleCategoricalSplit) {
TestEvaluateSingleSplit(true);
}
TEST(GpuHist, EvaluateSingleSplitMissing) {
GradientPairPrecise parent_sum(1.0, 1.5);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2};
thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0};
thrust::device_vector<float> feature_min_values = std::vector<float>{0.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator(tparam, feature_set.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
EXPECT_EQ(result.dir, kRightDir);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(1.5, 1.0));
}
TEST(GpuHist, EvaluateSingleSplitEmpty) {
TrainParam tparam = ZeroParam();
GPUHistEvaluator evaluator(tparam, 1, 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}).split;
EXPECT_EQ(result.findex, -1);
EXPECT_LT(result.loss_chg, 0.0f);
}
// Feature 0 has a better split, but the algorithm must select feature 1
TEST(GpuHist, EvaluateSingleSplitFeatureSampling) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(0.5, 0.5));
}
// Features 0 and 1 have identical gain, the algorithm must select 0
TEST(GpuHist, EvaluateSingleSplitBreakTies) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
}
TEST(GpuHist, EvaluateSplits) {
thrust::device_vector<DeviceSplitCandidate> out_splits(2);
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 0.0};
thrust::device_vector<GradientPairPrecise> feature_histogram_left =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
thrust::device_vector<GradientPairPrecise> feature_histogram_right =
std::vector<GradientPairPrecise>{
{-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input_left{
1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_left)};
EvaluateSplitInputs input_right{
2,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_right)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator{
tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0};
dh::device_vector<EvaluateSplitInputs> inputs = std::vector<EvaluateSplitInputs>{input_left,input_right};
evaluator.LaunchEvaluateSplits(input_left.feature_set.size(),dh::ToSpan(inputs),shared_inputs, evaluator.GetEvaluator(),
dh::ToSpan(out_splits));
DeviceSplitCandidate result_left = out_splits[0];
EXPECT_EQ(result_left.findex, 1);
EXPECT_EQ(result_left.fvalue, 11.0);
DeviceSplitCandidate result_right = out_splits[1];
EXPECT_EQ(result_right.findex, 0);
EXPECT_EQ(result_right.fvalue, 1.0);
}
TEST_F(TestPartitionBasedSplit, GpuHist) {
dh::device_vector<FeatureType> ft{std::vector<FeatureType>{FeatureType::kCategorical}};
GPUHistEvaluator evaluator{param_, static_cast<bst_feature_t>(info_.num_col_), 0};
cuts_.cut_ptrs_.SetDevice(0);
cuts_.cut_values_.SetDevice(0);
cuts_.min_vals_.SetDevice(0);
evaluator.Reset(cuts_, dh::ToSpan(ft), info_.num_col_, param_, 0);
dh::device_vector<GradientPairPrecise> d_hist(hist_[0].size());
auto node_hist = hist_[0];
dh::safe_cuda(hipMemcpy(d_hist.data().get(), node_hist.data(), node_hist.size_bytes(),
hipMemcpyHostToDevice));
dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}};
EvaluateSplitInputs input{0, 0, total_gpair_, dh::ToSpan(feature_set), dh::ToSpan(d_hist)};
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param_}, dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
};
auto split = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
ASSERT_NEAR(split.loss_chg, best_score_, 1e-16);
}
} // namespace tree
} // namespace xgboost
| b403442a2ffa92e501854db144cf043f7704fb72.cu | /*!
* Copyright 2020-2022 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include "../../../../src/tree/gpu_hist/evaluate_splits.cuh"
#include "../../helpers.h"
#include "../../histogram_helpers.h"
#include "../test_evaluate_splits.h" // TestPartitionBasedSplit
namespace xgboost {
namespace tree {
namespace {
auto ZeroParam() {
auto args = Args{{"min_child_weight", "0"},
{"lambda", "0"}};
TrainParam tparam;
tparam.UpdateAllowUnknown(args);
return tparam;
}
} // anonymous namespace
TEST_F(TestCategoricalSplitWithMissing, GPUHistEvaluator) {
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0};
GPUTrainingParam param{param_};
cuts_.cut_ptrs_.SetDevice(0);
cuts_.cut_values_.SetDevice(0);
cuts_.min_vals_.SetDevice(0);
thrust::device_vector<GradientPairPrecise> feature_histogram{feature_histogram_};
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
auto d_feature_types = dh::ToSpan(feature_types);
EvaluateSplitInputs input{1, 0, parent_sum_, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts_.cut_ptrs_.ConstDeviceSpan(),
cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{param_, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts_, dh::ToSpan(feature_types), feature_set.size(), param_, 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
ASSERT_EQ(result.thresh, 1);
this->CheckResult(result.loss_chg, result.findex, result.fvalue, result.is_cat,
result.dir == kLeftDir, result.left_sum, result.right_sum);
}
TEST(GpuHist, PartitionBasic) {
TrainParam tparam = ZeroParam();
tparam.max_cat_to_onehot = 0;
GPUTrainingParam param{tparam};
common::HistogramCuts cuts;
cuts.cut_values_.HostVector() = std::vector<float>{0.0, 1.0, 2.0};
cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 3};
cuts.min_vals_.HostVector() = std::vector<float>{0.0};
cuts.cut_ptrs_.SetDevice(0);
cuts.cut_values_.SetDevice(0);
cuts.min_vals_.SetDevice(0);
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types;
auto max_cat =
*std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
d_feature_types = dh::ToSpan(feature_types);
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}};
EvaluateSplitInputs input{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_EQ(cats, std::bitset<32>("11000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-7.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-3.0, 1.0}, {-3.0, 1.0}};
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_EQ(cats, std::bitset<32>("10000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// All -1.0, gain from splitting should be 0.0
GradientPairPrecise parent_sum(-3.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}};
EvaluateSplitInputs input{2, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_FLOAT_EQ(result.loss_chg, 0.0f);
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
// With 3.0/3.0 missing values
// Forward, first 2 categories are selected, while the last one go to left along with missing value
{
GradientPairPrecise parent_sum(0.0, 6.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}};
EvaluateSplitInputs input{3, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(cats, std::bitset<32>("11000000000000000000000000000000"));
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-3.0, 1.0}, {-1.0, 1.0}};
EvaluateSplitInputs input{4, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.dir, kLeftDir);
EXPECT_EQ(cats, std::bitset<32>("10100000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-3.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}};
EvaluateSplitInputs input{5, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(cats, std::bitset<32>("01000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
}
TEST(GpuHist, PartitionTwoFeatures) {
TrainParam tparam = ZeroParam();
tparam.max_cat_to_onehot = 0;
GPUTrainingParam param{tparam};
common::HistogramCuts cuts;
cuts.cut_values_.HostVector() = std::vector<float>{0.0, 1.0, 2.0, 0.0, 1.0, 2.0};
cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 3, 6};
cuts.min_vals_.HostVector() = std::vector<float>{0.0, 0.0};
cuts.cut_ptrs_.SetDevice(0);
cuts.cut_values_.SetDevice(0);
cuts.min_vals_.SetDevice(0);
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types(dh::ToSpan(feature_types));
auto max_cat =
*std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{
{-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
EvaluateSplitInputs input{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(cats, std::bitset<32>("11000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{
{-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0}};
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
auto cats = std::bitset<32>(evaluator.GetHostNodeCats(input.nidx)[0]);
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(cats, std::bitset<32>("10000000000000000000000000000000"));
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess());
}
}
TEST(GpuHist, PartitionTwoNodes) {
TrainParam tparam = ZeroParam();
tparam.max_cat_to_onehot = 0;
GPUTrainingParam param{tparam};
common::HistogramCuts cuts;
cuts.cut_values_.HostVector() = std::vector<float>{0.0, 1.0, 2.0};
cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 3};
cuts.min_vals_.HostVector() = std::vector<float>{0.0};
cuts.cut_ptrs_.SetDevice(0);
cuts.cut_values_.SetDevice(0);
cuts.min_vals_.SetDevice(0);
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types(dh::ToSpan(feature_types));
auto max_cat =
*std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram_a =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0},
{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
thrust::device_vector<EvaluateSplitInputs> inputs(2);
inputs[0] = EvaluateSplitInputs{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_a)};
thrust::device_vector<GradientPairPrecise> feature_histogram_b =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
inputs[1] = EvaluateSplitInputs{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_b)};
thrust::device_vector<GPUExpandEntry> results(2);
evaluator.EvaluateSplits({0, 1}, 1, dh::ToSpan(inputs), shared_inputs, dh::ToSpan(results));
GPUExpandEntry result_a = results[0];
GPUExpandEntry result_b = results[1];
EXPECT_EQ(std::bitset<32>(evaluator.GetHostNodeCats(0)[0]),
std::bitset<32>("10000000000000000000000000000000"));
EXPECT_EQ(std::bitset<32>(evaluator.GetHostNodeCats(1)[0]),
std::bitset<32>("11000000000000000000000000000000"));
}
}
void TestEvaluateSingleSplit(bool is_categorical) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
common::HistogramCuts cuts{MakeCutsForTest({1.0, 2.0, 11.0, 12.0}, {0, 2, 4}, {0.0, 0.0}, 0)};
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1};
// Setup gradients so that second feature gets higher gain
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
dh::device_vector<FeatureType> feature_types(feature_set.size(),
FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types;
if (is_categorical) {
auto max_cat = *std::max_element(cuts.cut_values_.HostVector().begin(),
cuts.cut_values_.HostVector().end());
cuts.SetCategorical(true, max_cat);
d_feature_types = dh::ToSpan(feature_types);
}
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
cuts.min_vals_.ConstDeviceSpan(),
};
GPUHistEvaluator evaluator{
tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 1);
if (is_categorical) {
ASSERT_TRUE(std::isnan(result.fvalue));
} else {
EXPECT_EQ(result.fvalue, 11.0);
}
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(),
parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(),
parent_sum.GetHess());
}
TEST(GpuHist, EvaluateSingleSplit) {
TestEvaluateSingleSplit(false);
}
TEST(GpuHist, EvaluateSingleCategoricalSplit) {
TestEvaluateSingleSplit(true);
}
TEST(GpuHist, EvaluateSingleSplitMissing) {
GradientPairPrecise parent_sum(1.0, 1.5);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2};
thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0};
thrust::device_vector<float> feature_min_values = std::vector<float>{0.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator(tparam, feature_set.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
EXPECT_EQ(result.dir, kRightDir);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(1.5, 1.0));
}
TEST(GpuHist, EvaluateSingleSplitEmpty) {
TrainParam tparam = ZeroParam();
GPUHistEvaluator evaluator(tparam, 1, 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}).split;
EXPECT_EQ(result.findex, -1);
EXPECT_LT(result.loss_chg, 0.0f);
}
// Feature 0 has a better split, but the algorithm must select feature 1
TEST(GpuHist, EvaluateSingleSplitFeatureSampling) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(0.5, 0.5));
}
// Features 0 and 1 have identical gain, the algorithm must select 0
TEST(GpuHist, EvaluateSingleSplitBreakTies) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
}
TEST(GpuHist, EvaluateSplits) {
thrust::device_vector<DeviceSplitCandidate> out_splits(2);
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 0.0};
thrust::device_vector<GradientPairPrecise> feature_histogram_left =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
thrust::device_vector<GradientPairPrecise> feature_histogram_right =
std::vector<GradientPairPrecise>{
{-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
EvaluateSplitInputs input_left{
1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_left)};
EvaluateSplitInputs input_right{
2,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_right)};
EvaluateSplitSharedInputs shared_inputs{
param,
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
};
GPUHistEvaluator evaluator{
tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0};
dh::device_vector<EvaluateSplitInputs> inputs = std::vector<EvaluateSplitInputs>{input_left,input_right};
evaluator.LaunchEvaluateSplits(input_left.feature_set.size(),dh::ToSpan(inputs),shared_inputs, evaluator.GetEvaluator(),
dh::ToSpan(out_splits));
DeviceSplitCandidate result_left = out_splits[0];
EXPECT_EQ(result_left.findex, 1);
EXPECT_EQ(result_left.fvalue, 11.0);
DeviceSplitCandidate result_right = out_splits[1];
EXPECT_EQ(result_right.findex, 0);
EXPECT_EQ(result_right.fvalue, 1.0);
}
TEST_F(TestPartitionBasedSplit, GpuHist) {
dh::device_vector<FeatureType> ft{std::vector<FeatureType>{FeatureType::kCategorical}};
GPUHistEvaluator evaluator{param_, static_cast<bst_feature_t>(info_.num_col_), 0};
cuts_.cut_ptrs_.SetDevice(0);
cuts_.cut_values_.SetDevice(0);
cuts_.min_vals_.SetDevice(0);
evaluator.Reset(cuts_, dh::ToSpan(ft), info_.num_col_, param_, 0);
dh::device_vector<GradientPairPrecise> d_hist(hist_[0].size());
auto node_hist = hist_[0];
dh::safe_cuda(cudaMemcpy(d_hist.data().get(), node_hist.data(), node_hist.size_bytes(),
cudaMemcpyHostToDevice));
dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}};
EvaluateSplitInputs input{0, 0, total_gpair_, dh::ToSpan(feature_set), dh::ToSpan(d_hist)};
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param_}, dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
};
auto split = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
ASSERT_NEAR(split.loss_chg, best_score_, 1e-16);
}
} // namespace tree
} // namespace xgboost
|
62566ce7828971b20fcd29d1c44373cde6940462.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Yue Jiang in 2018.11.24
//
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <vector>
#include <ATen/ATen.h>
#define PI 3.14159265358979323846f
namespace {
__device__ __forceinline__ float DegToRad(const float °) { return (deg * (PI / 180.f)); }
__device__ __forceinline__ float length(
const float x,
const float y,
const float z) {
return sqrtf(powf(x, 2) + powf(y, 2) + powf(z, 2));
}
// Cross product
__device__ __forceinline__ float cross_x(
const float a_x,
const float a_y,
const float a_z,
const float b_x,
const float b_y,
const float b_z) {
return a_y * b_z - a_z * b_y;
}
__device__ __forceinline__ float cross_y(
const float a_x,
const float a_y,
const float a_z,
const float b_x,
const float b_y,
const float b_z) {
return a_z * b_x - a_x * b_z;
}
__device__ __forceinline__ float cross_z(
const float a_x,
const float a_y,
const float a_z,
const float b_x,
const float b_y,
const float b_z) {
return a_x * b_y - a_y * b_x;
}
__global__ void GenerateRay(
float* origins,
float* directions,
float* origin_image_distances,
float* pixel_distances,
const int width,
const int height,
const float eye_x,
const float eye_y,
const float eye_z) {
const float at_x = 0;
const float at_y = 0;
const float at_z = 0;
const float up_x = 0;
const float up_y = 1;
const float up_z = 0;
// Compute camera view volume
const float top = tan(DegToRad(30));
const float bottom = -top;
const float right = (__int2float_rd(width) / __int2float_rd(height)) * top;
const float left = -right;
// Compute local base
const float w_x = (eye_x - at_x) / length(eye_x - at_x, eye_y - at_y, eye_z - at_z);
const float w_y = (eye_y - at_y) / length(eye_x - at_x, eye_y - at_y, eye_z - at_z);
const float w_z = (eye_z - at_z) / length(eye_x - at_x, eye_y - at_y, eye_z - at_z);
const float cross_up_w_x = cross_x(up_x, up_y, up_z, w_x, w_y, w_z);
const float cross_up_w_y = cross_y(up_x, up_y, up_z, w_x, w_y, w_z);
const float cross_up_w_z = cross_z(up_x, up_y, up_z, w_x, w_y, w_z);
const float u_x = (cross_up_w_x) / length(cross_up_w_x, cross_up_w_y, cross_up_w_z);
const float u_y = (cross_up_w_y) / length(cross_up_w_x, cross_up_w_y, cross_up_w_z);
const float u_z = (cross_up_w_z) / length(cross_up_w_x, cross_up_w_y, cross_up_w_z);
const float v_x = cross_x(w_x, w_y, w_z, u_x, u_y, u_z);
const float v_y = cross_y(w_x, w_y, w_z, u_x, u_y, u_z);
const float v_z = cross_z(w_x, w_y, w_z, u_x, u_y, u_z);
const int pixel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel_index < width * height) {
const int x = pixel_index % width;
const int y = pixel_index / width;
const int i = 3 * pixel_index;
// Compute point on view plane
// Ray passes through the center of the pixel
const float view_plane_x = left + (right - left) * (__int2float_rd(x) + 0.5) / __int2float_rd(width);
const float view_plane_y = top - (top - bottom) * (__int2float_rd(y) + 0.5) / __int2float_rd(height);
const float s_x = view_plane_x * u_x + view_plane_y * v_x - w_x;
const float s_y = view_plane_x * u_y + view_plane_y * v_y - w_y;
const float s_z = view_plane_x * u_z + view_plane_y * v_z - w_z;
origins[i] = eye_x;
origins[i+1] = eye_y;
origins[i+2] = eye_z;
directions[i] = s_x / length(s_x, s_y, s_z);
directions[i+1] = s_y / length(s_x, s_y, s_z);
directions[i+2] = s_z / length(s_x, s_y, s_z);
origin_image_distances[pixel_index] = length(s_x, s_y, s_z);
pixel_distances[pixel_index] = (right - left) / __int2float_rd(width);
}
}
// Check if a point is inside
__device__ __forceinline__ bool InsideBoundingBox(
const float p_x,
const float p_y,
const float p_z,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z) {
return (p_x >= bounding_box_min_x) && (p_x <= bounding_box_max_x) &&
(p_y >= bounding_box_min_y) && (p_y <= bounding_box_max_y) &&
(p_z >= bounding_box_min_z) && (p_z <= bounding_box_max_z);
}
// Compute the distance along the ray between the point and the bounding box
__device__ float Distance(
const float reached_point_x,
const float reached_point_y,
const float reached_point_z,
float direction_x,
float direction_y,
float direction_z,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z) {
float dist = -1.f;
direction_x = direction_x / length(direction_x, direction_y, direction_z);
direction_y = direction_y / length(direction_x, direction_y, direction_z);
direction_z = direction_z / length(direction_x, direction_y, direction_z);
// For each axis count any excess distance outside box extents
float v = reached_point_x;
float d = direction_x;
if (dist == -1) {
if ((v < bounding_box_min_x) && (d > 0)) { dist = (bounding_box_min_x - v) / d; }
if ((v > bounding_box_max_x) && (d < 0)) { dist = (bounding_box_max_x - v) / d; }
} else {
if ((v < bounding_box_min_x) && (d > 0)) { dist = fmaxf(dist, (bounding_box_min_x - v) / d); }
if ((v > bounding_box_max_x) && (d < 0)) { dist = fmaxf(dist, (bounding_box_max_x - v) / d); }
}
v = reached_point_y;
d = direction_y;
if (dist == -1) {
if ((v < bounding_box_min_y) && (d > 0)) { dist = (bounding_box_min_y - v) / d; }
if ((v > bounding_box_max_y) && (d < 0)) { dist = (bounding_box_max_y - v) / d; }
} else {
if ((v < bounding_box_min_y) && (d > 0)) { dist = fmaxf(dist, (bounding_box_min_y - v) / d); }
if ((v > bounding_box_max_y) && (d < 0)) { dist = fmaxf(dist, (bounding_box_max_y - v) / d); }
}
v = reached_point_z;
d = direction_z;
if (dist == -1) {
if ((v < bounding_box_min_z) && (d > 0)) { dist = (bounding_box_min_z - v) / d; }
if ((v > bounding_box_max_z) && (d < 0)) { dist = (bounding_box_max_z - v) / d; }
} else {
if ((v < bounding_box_min_z) && (d > 0)) { dist = fmaxf(dist, (bounding_box_min_z - v) / d); }
if ((v > bounding_box_max_z) && (d < 0)) { dist = fmaxf(dist, (bounding_box_max_z - v) / d); }
}
return dist;
}
__device__ __forceinline__ int flat(float const x, float const y, float const z,
int const grid_res_x, int const grid_res_y, int const grid_res_z) {
return __int2float_rd(z) + __int2float_rd(y) * grid_res_z + __int2float_rd(x) * grid_res_z * grid_res_y;
}
// Get the signed distance value at the specific point
__device__ float ValueAt(
const float* grid,
const float reached_point_x,
const float reached_point_y,
const float reached_point_z,
const float direction_x,
const float direction_y,
const float direction_z,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z,
const int grid_res_x,
const int grid_res_y,
const int grid_res_z,
const bool first_time) {
// Check if we are outside the BBOX
if (!InsideBoundingBox(reached_point_x, reached_point_y, reached_point_z,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z)) {
// If it is the first time, then the ray has not entered the grid
if (first_time) {
return Distance(reached_point_x, reached_point_y, reached_point_z,
direction_x, direction_y, direction_z,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z) + 0.00001f;
}
// Otherwise, the ray has left the grid
else {
return -1;
}
}
// Compute voxel size
float voxel_size = (bounding_box_max_x - bounding_box_min_x) / (grid_res_x - 1);
// Compute the the minimum point of the intersecting voxel
float min_index_x = floorf((reached_point_x - bounding_box_min_x) / voxel_size);
float min_index_y = floorf((reached_point_y - bounding_box_min_y) / voxel_size);
float min_index_z = floorf((reached_point_z - bounding_box_min_z) / voxel_size);
// Check whether the ray intersects the vertex with the last index of the axis
// If so, we should record the previous index
if (min_index_x == (bounding_box_max_x - bounding_box_min_x) / voxel_size) {
min_index_x = (bounding_box_max_x - bounding_box_min_x) / voxel_size - 1;
}
if (min_index_y == (bounding_box_max_y - bounding_box_min_y) / voxel_size) {
min_index_y = (bounding_box_max_y - bounding_box_min_y) / voxel_size - 1;
}
if (min_index_z == (bounding_box_max_z - bounding_box_min_z) / voxel_size) {
min_index_z = (bounding_box_max_z - bounding_box_min_z) / voxel_size - 1;
}
// Linear interpolate along x axis the eight values
const float tx = (reached_point_x - (bounding_box_min_x + min_index_x * voxel_size)) / voxel_size;
const float c01 = (1.f - tx) * grid[flat(min_index_x, min_index_y, min_index_z, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y, min_index_z, grid_res_x, grid_res_y, grid_res_z)];
const float c23 = (1.f - tx) * grid[flat(min_index_x, min_index_y+1, min_index_z, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y+1, min_index_z, grid_res_x, grid_res_y, grid_res_z)];
const float c45 = (1.f - tx) * grid[flat(min_index_x, min_index_y, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)];
const float c67 = (1.f - tx) * grid[flat(min_index_x, min_index_y+1, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y+1, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)];
// Linear interpolate along the y axis
const float ty = (reached_point_y - (bounding_box_min_y + min_index_y * voxel_size)) / voxel_size;
const float c0 = (1.f - ty) * c01 + ty * c23;
const float c1 = (1.f - ty) * c45 + ty * c67;
// Return final value interpolated along z
const float tz = (reached_point_z - (bounding_box_min_z + min_index_z * voxel_size)) / voxel_size;
return (1.f - tz) * c0 + tz * c1;
}
// Compute the intersection of the ray and the grid
// The intersection procedure uses ray marching to check if we have an interaction with the stored surface
__global__ void Intersect(
const float* grid,
const float* origins,
const float* directions,
const float* origin_image_distances,
const float* pixel_distances,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z,
const int grid_res_x,
const int grid_res_y,
const int grid_res_z,
float* voxel_position,
float* intersection_pos,
const int width,
const int height) {
// Compute voxel size
const float voxel_size = (bounding_box_max_x - bounding_box_min_x) / (grid_res_x - 1);
// Define constant values
const int max_steps = 1000;
bool first_time = true;
float depth = 0;
int gotten_result = 0;
const int pixel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel_index < width * height) {
const int i = 3 * pixel_index;
for (int steps = 0; steps < max_steps; steps++) {
float reached_point_x = origins[i] + depth * directions[i];
float reached_point_y = origins[i+1] + depth * directions[i+1];
float reached_point_z = origins[i+2] + depth * directions[i+2];
// Get the signed distance value for the point the ray reaches
const float distance = ValueAt(grid, reached_point_x, reached_point_y, reached_point_z,
directions[i], directions[i+1], directions[i+2],
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
grid_res_x,
grid_res_y,
grid_res_z, first_time);
first_time = false;
// Check if the ray is going ourside the bounding box
if (distance == -1) {
voxel_position[i] = -1;
voxel_position[i+1] = -1;
voxel_position[i+2] = -1;
intersection_pos[i] = -1;
intersection_pos[i+1] = -1;
intersection_pos[i+2] = -1;
gotten_result = 1;
break;
}
// Check if we are close enough to the surface
if (distance < pixel_distances[pixel_index] / origin_image_distances[pixel_index] * depth / 10) {
// Compute the the minimum point of the intersecting voxel
voxel_position[i] = floorf((reached_point_x - bounding_box_min_x) / voxel_size);
voxel_position[i+1] = floorf((reached_point_y - bounding_box_min_y) / voxel_size);
voxel_position[i+2] = floorf((reached_point_z - bounding_box_min_z) / voxel_size);
if (voxel_position[i] == grid_res_x - 1) {
voxel_position[i] = voxel_position[i] - 1;
}
if (voxel_position[i+1] == grid_res_x - 1) {
voxel_position[i+1] = voxel_position[i+1] - 1;
}
if (voxel_position[i+2] == grid_res_x - 1) {
voxel_position[i+2] = voxel_position[i+2] - 1;
}
intersection_pos[i] = reached_point_x;
intersection_pos[i+1] = reached_point_y;
intersection_pos[i+2] = reached_point_z;
gotten_result = 1;
break;
}
// Increase distance
depth += distance;
}
if (gotten_result == 0) {
// No intersections
voxel_position[i] = -1;
voxel_position[i+1] = -1;
voxel_position[i+2] = -1;
intersection_pos[i] = -1;
intersection_pos[i+1] = -1;
intersection_pos[i+2] = -1;
}
}
}
} // namespace
// Ray marching to get the first corner position of the voxel the ray intersects
std::vector<at::Tensor> ray_matching_cuda(
const at::Tensor w_h_3,
const at::Tensor w_h,
const at::Tensor grid,
const int width,
const int height,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z,
const int grid_res_x,
const int grid_res_y,
const int grid_res_z,
const float eye_x,
const float eye_y,
const float eye_z) {
const int thread = 512;
at::Tensor origins = at::zeros_like(w_h_3);
at::Tensor directions = at::zeros_like(w_h_3);
at::Tensor origin_image_distances = at::zeros_like(w_h);
at::Tensor pixel_distances = at::zeros_like(w_h);
hipLaunchKernelGGL(( GenerateRay), dim3((width * height + thread - 1) / thread), dim3(thread), 0, 0,
origins.data<float>(),
directions.data<float>(),
origin_image_distances.data<float>(),
pixel_distances.data<float>(),
width,
height,
eye_x,
eye_y,
eye_z);
at::Tensor voxel_position = at::zeros_like(w_h_3);
at::Tensor intersection_pos = at::zeros_like(w_h_3);
hipLaunchKernelGGL(( Intersect), dim3((width * height + thread - 1) / thread), dim3(thread), 0, 0,
grid.data<float>(),
origins.data<float>(),
directions.data<float>(),
origin_image_distances.data<float>(),
pixel_distances.data<float>(),
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
grid_res_x,
grid_res_y,
grid_res_z,
voxel_position.data<float>(),
intersection_pos.data<float>(),
width,
height);
return {intersection_pos, voxel_position, directions};
}
| 62566ce7828971b20fcd29d1c44373cde6940462.cu | //
// Created by Yue Jiang in 2018.11.24
//
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <vector>
#include <ATen/ATen.h>
#define PI 3.14159265358979323846f
namespace {
__device__ __forceinline__ float DegToRad(const float °) { return (deg * (PI / 180.f)); }
__device__ __forceinline__ float length(
const float x,
const float y,
const float z) {
return sqrtf(powf(x, 2) + powf(y, 2) + powf(z, 2));
}
// Cross product
__device__ __forceinline__ float cross_x(
const float a_x,
const float a_y,
const float a_z,
const float b_x,
const float b_y,
const float b_z) {
return a_y * b_z - a_z * b_y;
}
__device__ __forceinline__ float cross_y(
const float a_x,
const float a_y,
const float a_z,
const float b_x,
const float b_y,
const float b_z) {
return a_z * b_x - a_x * b_z;
}
__device__ __forceinline__ float cross_z(
const float a_x,
const float a_y,
const float a_z,
const float b_x,
const float b_y,
const float b_z) {
return a_x * b_y - a_y * b_x;
}
__global__ void GenerateRay(
float* origins,
float* directions,
float* origin_image_distances,
float* pixel_distances,
const int width,
const int height,
const float eye_x,
const float eye_y,
const float eye_z) {
const float at_x = 0;
const float at_y = 0;
const float at_z = 0;
const float up_x = 0;
const float up_y = 1;
const float up_z = 0;
// Compute camera view volume
const float top = tan(DegToRad(30));
const float bottom = -top;
const float right = (__int2float_rd(width) / __int2float_rd(height)) * top;
const float left = -right;
// Compute local base
const float w_x = (eye_x - at_x) / length(eye_x - at_x, eye_y - at_y, eye_z - at_z);
const float w_y = (eye_y - at_y) / length(eye_x - at_x, eye_y - at_y, eye_z - at_z);
const float w_z = (eye_z - at_z) / length(eye_x - at_x, eye_y - at_y, eye_z - at_z);
const float cross_up_w_x = cross_x(up_x, up_y, up_z, w_x, w_y, w_z);
const float cross_up_w_y = cross_y(up_x, up_y, up_z, w_x, w_y, w_z);
const float cross_up_w_z = cross_z(up_x, up_y, up_z, w_x, w_y, w_z);
const float u_x = (cross_up_w_x) / length(cross_up_w_x, cross_up_w_y, cross_up_w_z);
const float u_y = (cross_up_w_y) / length(cross_up_w_x, cross_up_w_y, cross_up_w_z);
const float u_z = (cross_up_w_z) / length(cross_up_w_x, cross_up_w_y, cross_up_w_z);
const float v_x = cross_x(w_x, w_y, w_z, u_x, u_y, u_z);
const float v_y = cross_y(w_x, w_y, w_z, u_x, u_y, u_z);
const float v_z = cross_z(w_x, w_y, w_z, u_x, u_y, u_z);
const int pixel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel_index < width * height) {
const int x = pixel_index % width;
const int y = pixel_index / width;
const int i = 3 * pixel_index;
// Compute point on view plane
// Ray passes through the center of the pixel
const float view_plane_x = left + (right - left) * (__int2float_rd(x) + 0.5) / __int2float_rd(width);
const float view_plane_y = top - (top - bottom) * (__int2float_rd(y) + 0.5) / __int2float_rd(height);
const float s_x = view_plane_x * u_x + view_plane_y * v_x - w_x;
const float s_y = view_plane_x * u_y + view_plane_y * v_y - w_y;
const float s_z = view_plane_x * u_z + view_plane_y * v_z - w_z;
origins[i] = eye_x;
origins[i+1] = eye_y;
origins[i+2] = eye_z;
directions[i] = s_x / length(s_x, s_y, s_z);
directions[i+1] = s_y / length(s_x, s_y, s_z);
directions[i+2] = s_z / length(s_x, s_y, s_z);
origin_image_distances[pixel_index] = length(s_x, s_y, s_z);
pixel_distances[pixel_index] = (right - left) / __int2float_rd(width);
}
}
// Check if a point is inside
__device__ __forceinline__ bool InsideBoundingBox(
const float p_x,
const float p_y,
const float p_z,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z) {
return (p_x >= bounding_box_min_x) && (p_x <= bounding_box_max_x) &&
(p_y >= bounding_box_min_y) && (p_y <= bounding_box_max_y) &&
(p_z >= bounding_box_min_z) && (p_z <= bounding_box_max_z);
}
// Compute the distance along the ray between the point and the bounding box
__device__ float Distance(
const float reached_point_x,
const float reached_point_y,
const float reached_point_z,
float direction_x,
float direction_y,
float direction_z,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z) {
float dist = -1.f;
direction_x = direction_x / length(direction_x, direction_y, direction_z);
direction_y = direction_y / length(direction_x, direction_y, direction_z);
direction_z = direction_z / length(direction_x, direction_y, direction_z);
// For each axis count any excess distance outside box extents
float v = reached_point_x;
float d = direction_x;
if (dist == -1) {
if ((v < bounding_box_min_x) && (d > 0)) { dist = (bounding_box_min_x - v) / d; }
if ((v > bounding_box_max_x) && (d < 0)) { dist = (bounding_box_max_x - v) / d; }
} else {
if ((v < bounding_box_min_x) && (d > 0)) { dist = fmaxf(dist, (bounding_box_min_x - v) / d); }
if ((v > bounding_box_max_x) && (d < 0)) { dist = fmaxf(dist, (bounding_box_max_x - v) / d); }
}
v = reached_point_y;
d = direction_y;
if (dist == -1) {
if ((v < bounding_box_min_y) && (d > 0)) { dist = (bounding_box_min_y - v) / d; }
if ((v > bounding_box_max_y) && (d < 0)) { dist = (bounding_box_max_y - v) / d; }
} else {
if ((v < bounding_box_min_y) && (d > 0)) { dist = fmaxf(dist, (bounding_box_min_y - v) / d); }
if ((v > bounding_box_max_y) && (d < 0)) { dist = fmaxf(dist, (bounding_box_max_y - v) / d); }
}
v = reached_point_z;
d = direction_z;
if (dist == -1) {
if ((v < bounding_box_min_z) && (d > 0)) { dist = (bounding_box_min_z - v) / d; }
if ((v > bounding_box_max_z) && (d < 0)) { dist = (bounding_box_max_z - v) / d; }
} else {
if ((v < bounding_box_min_z) && (d > 0)) { dist = fmaxf(dist, (bounding_box_min_z - v) / d); }
if ((v > bounding_box_max_z) && (d < 0)) { dist = fmaxf(dist, (bounding_box_max_z - v) / d); }
}
return dist;
}
__device__ __forceinline__ int flat(float const x, float const y, float const z,
int const grid_res_x, int const grid_res_y, int const grid_res_z) {
return __int2float_rd(z) + __int2float_rd(y) * grid_res_z + __int2float_rd(x) * grid_res_z * grid_res_y;
}
// Get the signed distance value at the specific point
__device__ float ValueAt(
const float* grid,
const float reached_point_x,
const float reached_point_y,
const float reached_point_z,
const float direction_x,
const float direction_y,
const float direction_z,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z,
const int grid_res_x,
const int grid_res_y,
const int grid_res_z,
const bool first_time) {
// Check if we are outside the BBOX
if (!InsideBoundingBox(reached_point_x, reached_point_y, reached_point_z,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z)) {
// If it is the first time, then the ray has not entered the grid
if (first_time) {
return Distance(reached_point_x, reached_point_y, reached_point_z,
direction_x, direction_y, direction_z,
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z) + 0.00001f;
}
// Otherwise, the ray has left the grid
else {
return -1;
}
}
// Compute voxel size
float voxel_size = (bounding_box_max_x - bounding_box_min_x) / (grid_res_x - 1);
// Compute the the minimum point of the intersecting voxel
float min_index_x = floorf((reached_point_x - bounding_box_min_x) / voxel_size);
float min_index_y = floorf((reached_point_y - bounding_box_min_y) / voxel_size);
float min_index_z = floorf((reached_point_z - bounding_box_min_z) / voxel_size);
// Check whether the ray intersects the vertex with the last index of the axis
// If so, we should record the previous index
if (min_index_x == (bounding_box_max_x - bounding_box_min_x) / voxel_size) {
min_index_x = (bounding_box_max_x - bounding_box_min_x) / voxel_size - 1;
}
if (min_index_y == (bounding_box_max_y - bounding_box_min_y) / voxel_size) {
min_index_y = (bounding_box_max_y - bounding_box_min_y) / voxel_size - 1;
}
if (min_index_z == (bounding_box_max_z - bounding_box_min_z) / voxel_size) {
min_index_z = (bounding_box_max_z - bounding_box_min_z) / voxel_size - 1;
}
// Linear interpolate along x axis the eight values
const float tx = (reached_point_x - (bounding_box_min_x + min_index_x * voxel_size)) / voxel_size;
const float c01 = (1.f - tx) * grid[flat(min_index_x, min_index_y, min_index_z, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y, min_index_z, grid_res_x, grid_res_y, grid_res_z)];
const float c23 = (1.f - tx) * grid[flat(min_index_x, min_index_y+1, min_index_z, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y+1, min_index_z, grid_res_x, grid_res_y, grid_res_z)];
const float c45 = (1.f - tx) * grid[flat(min_index_x, min_index_y, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)];
const float c67 = (1.f - tx) * grid[flat(min_index_x, min_index_y+1, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)]
+ tx * grid[flat(min_index_x+1, min_index_y+1, min_index_z+1, grid_res_x, grid_res_y, grid_res_z)];
// Linear interpolate along the y axis
const float ty = (reached_point_y - (bounding_box_min_y + min_index_y * voxel_size)) / voxel_size;
const float c0 = (1.f - ty) * c01 + ty * c23;
const float c1 = (1.f - ty) * c45 + ty * c67;
// Return final value interpolated along z
const float tz = (reached_point_z - (bounding_box_min_z + min_index_z * voxel_size)) / voxel_size;
return (1.f - tz) * c0 + tz * c1;
}
// Compute the intersection of the ray and the grid
// The intersection procedure uses ray marching to check if we have an interaction with the stored surface
__global__ void Intersect(
const float* grid,
const float* origins,
const float* directions,
const float* origin_image_distances,
const float* pixel_distances,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z,
const int grid_res_x,
const int grid_res_y,
const int grid_res_z,
float* voxel_position,
float* intersection_pos,
const int width,
const int height) {
// Compute voxel size
const float voxel_size = (bounding_box_max_x - bounding_box_min_x) / (grid_res_x - 1);
// Define constant values
const int max_steps = 1000;
bool first_time = true;
float depth = 0;
int gotten_result = 0;
const int pixel_index = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel_index < width * height) {
const int i = 3 * pixel_index;
for (int steps = 0; steps < max_steps; steps++) {
float reached_point_x = origins[i] + depth * directions[i];
float reached_point_y = origins[i+1] + depth * directions[i+1];
float reached_point_z = origins[i+2] + depth * directions[i+2];
// Get the signed distance value for the point the ray reaches
const float distance = ValueAt(grid, reached_point_x, reached_point_y, reached_point_z,
directions[i], directions[i+1], directions[i+2],
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
grid_res_x,
grid_res_y,
grid_res_z, first_time);
first_time = false;
// Check if the ray is going ourside the bounding box
if (distance == -1) {
voxel_position[i] = -1;
voxel_position[i+1] = -1;
voxel_position[i+2] = -1;
intersection_pos[i] = -1;
intersection_pos[i+1] = -1;
intersection_pos[i+2] = -1;
gotten_result = 1;
break;
}
// Check if we are close enough to the surface
if (distance < pixel_distances[pixel_index] / origin_image_distances[pixel_index] * depth / 10) {
// Compute the the minimum point of the intersecting voxel
voxel_position[i] = floorf((reached_point_x - bounding_box_min_x) / voxel_size);
voxel_position[i+1] = floorf((reached_point_y - bounding_box_min_y) / voxel_size);
voxel_position[i+2] = floorf((reached_point_z - bounding_box_min_z) / voxel_size);
if (voxel_position[i] == grid_res_x - 1) {
voxel_position[i] = voxel_position[i] - 1;
}
if (voxel_position[i+1] == grid_res_x - 1) {
voxel_position[i+1] = voxel_position[i+1] - 1;
}
if (voxel_position[i+2] == grid_res_x - 1) {
voxel_position[i+2] = voxel_position[i+2] - 1;
}
intersection_pos[i] = reached_point_x;
intersection_pos[i+1] = reached_point_y;
intersection_pos[i+2] = reached_point_z;
gotten_result = 1;
break;
}
// Increase distance
depth += distance;
}
if (gotten_result == 0) {
// No intersections
voxel_position[i] = -1;
voxel_position[i+1] = -1;
voxel_position[i+2] = -1;
intersection_pos[i] = -1;
intersection_pos[i+1] = -1;
intersection_pos[i+2] = -1;
}
}
}
} // namespace
// Ray marching to get the first corner position of the voxel the ray intersects
std::vector<at::Tensor> ray_matching_cuda(
const at::Tensor w_h_3,
const at::Tensor w_h,
const at::Tensor grid,
const int width,
const int height,
const float bounding_box_min_x,
const float bounding_box_min_y,
const float bounding_box_min_z,
const float bounding_box_max_x,
const float bounding_box_max_y,
const float bounding_box_max_z,
const int grid_res_x,
const int grid_res_y,
const int grid_res_z,
const float eye_x,
const float eye_y,
const float eye_z) {
const int thread = 512;
at::Tensor origins = at::zeros_like(w_h_3);
at::Tensor directions = at::zeros_like(w_h_3);
at::Tensor origin_image_distances = at::zeros_like(w_h);
at::Tensor pixel_distances = at::zeros_like(w_h);
GenerateRay<<<(width * height + thread - 1) / thread, thread>>>(
origins.data<float>(),
directions.data<float>(),
origin_image_distances.data<float>(),
pixel_distances.data<float>(),
width,
height,
eye_x,
eye_y,
eye_z);
at::Tensor voxel_position = at::zeros_like(w_h_3);
at::Tensor intersection_pos = at::zeros_like(w_h_3);
Intersect<<<(width * height + thread - 1) / thread, thread>>>(
grid.data<float>(),
origins.data<float>(),
directions.data<float>(),
origin_image_distances.data<float>(),
pixel_distances.data<float>(),
bounding_box_min_x,
bounding_box_min_y,
bounding_box_min_z,
bounding_box_max_x,
bounding_box_max_y,
bounding_box_max_z,
grid_res_x,
grid_res_y,
grid_res_z,
voxel_position.data<float>(),
intersection_pos.data<float>(),
width,
height);
return {intersection_pos, voxel_position, directions};
}
|
79c0a7c18083f1f12bf51024ae17f56a2590197c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void compute(float* D, int n, int div) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
if (thread_id < div) {
__asm volatile (
" .reg .f32 %r12;\n\t"
" .reg .f32 %r13;\n\t"
" .reg .f32 %r14;\n\t"
" .reg .f32 %r15;\n\t"
" .reg .f32 %r16;\n\t"
" .reg .f32 %r17;\n\t"
" .reg .f32 %r18;\n\t"
" .reg .f32 %r19;\n\t"
" .reg .f32 %r20;\n\t"
" .reg .f32 %r21;\n\t"
" .reg .f32 %r22;\n\t"
" .reg .f32 %r23;\n\t"
" .reg .f32 %r24;\n\t"
" .reg .f32 %r25;\n\t"
" .reg .f32 %r26;\n\t"
" .reg .f32 %r27;\n\t"
" .reg .f32 %r28;\n\t"
"mov.f32 %r12, 4.4;\n\t"
"mov.f32 %r13, %r12;\n\t"
"mov.f32 %r14, 2.2;\n\t"
"mov.f32 %r15, 3.3;\n\t"
"mov.f32 %r16, 1.23;\n\t"
"mov.f32 %r17, 2.42;\n\t"
"mov.f32 %r18, 3.34;\n\t"
"mov.f32 %r19, 5.62;\n\t"
"mov.f32 %r20, 2.56;\n\t"
"mov.f32 %r21, 1.56;\n\t"
"mov.f32 %r22, 2.56;\n\t"
"mov.f32 %r23, 5.56;\n\t"
"mov.f32 %r24, 8.56;\n\t"
"mov.f32 %r25, 3.56;\n\t"
"mov.f32 %r26, 5.56;\n\t"
"mov.f32 %r27, 6.56;\n\t"
"mov.f32 %r28, 5.6;\n\t"
);
for (int k = 0; k < n; k++) {
__asm volatile (
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = I1;
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 5) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// hipMalloc((void**)&d_A, sizeof(float));
// hipMalloc((void**)&d_B, sizeof(float));
// hipMalloc((void**)&d_C, sizeof(float));
hipMalloc((void**)&d_res, sizeof(float));
// hipMemcpy(d_A, h_A, sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_B, h_B, sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_C, h_C, sizeof(float), hipMemcpyHostToDevice);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
// hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_A, d_B, d_C, d_res, iterations);
hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_res, iterations, divergence);
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::cout << "GPU Elapsed Time = " << time << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceSynchronize();
hipMemcpy(h_res, d_res, sizeof(float), hipMemcpyDeviceToHost);
return 0;
}
| 79c0a7c18083f1f12bf51024ae17f56a2590197c.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void compute(float* D, int n, int div) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
if (thread_id < div) {
__asm volatile (
" .reg .f32 %r12;\n\t"
" .reg .f32 %r13;\n\t"
" .reg .f32 %r14;\n\t"
" .reg .f32 %r15;\n\t"
" .reg .f32 %r16;\n\t"
" .reg .f32 %r17;\n\t"
" .reg .f32 %r18;\n\t"
" .reg .f32 %r19;\n\t"
" .reg .f32 %r20;\n\t"
" .reg .f32 %r21;\n\t"
" .reg .f32 %r22;\n\t"
" .reg .f32 %r23;\n\t"
" .reg .f32 %r24;\n\t"
" .reg .f32 %r25;\n\t"
" .reg .f32 %r26;\n\t"
" .reg .f32 %r27;\n\t"
" .reg .f32 %r28;\n\t"
"mov.f32 %r12, 4.4;\n\t"
"mov.f32 %r13, %r12;\n\t"
"mov.f32 %r14, 2.2;\n\t"
"mov.f32 %r15, 3.3;\n\t"
"mov.f32 %r16, 1.23;\n\t"
"mov.f32 %r17, 2.42;\n\t"
"mov.f32 %r18, 3.34;\n\t"
"mov.f32 %r19, 5.62;\n\t"
"mov.f32 %r20, 2.56;\n\t"
"mov.f32 %r21, 1.56;\n\t"
"mov.f32 %r22, 2.56;\n\t"
"mov.f32 %r23, 5.56;\n\t"
"mov.f32 %r24, 8.56;\n\t"
"mov.f32 %r25, 3.56;\n\t"
"mov.f32 %r26, 5.56;\n\t"
"mov.f32 %r27, 6.56;\n\t"
"mov.f32 %r28, 5.6;\n\t"
);
for (int k = 0; k < n; k++) {
__asm volatile (
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
"rsqrt.approx.f32 %r13, %r13;\n\t"
"rsqrt.approx.f32 %r14, %r14;\n\t"
"rsqrt.approx.f32 %r15, %r15;\n\t"
"rsqrt.approx.f32 %r16, %r16;\n\t"
"rsqrt.approx.f32 %r17, %r17;\n\t"
"rsqrt.approx.f32 %r18, %r18;\n\t"
"rsqrt.approx.f32 %r19, %r19;\n\t"
"rsqrt.approx.f32 %r20, %r20;\n\t"
"rsqrt.approx.f32 %r21, %r21;\n\t"
"rsqrt.approx.f32 %r22, %r22;\n\t"
"rsqrt.approx.f32 %r23, %r23;\n\t"
"rsqrt.approx.f32 %r24, %r24;\n\t"
"rsqrt.approx.f32 %r25, %r25;\n\t"
"rsqrt.approx.f32 %r26, %r26;\n\t"
"rsqrt.approx.f32 %r27, %r27;\n\t"
"rsqrt.approx.f32 %r28, %r28;\n\t"
);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = I1;
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 5) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// cudaMalloc((void**)&d_A, sizeof(float));
// cudaMalloc((void**)&d_B, sizeof(float));
// cudaMalloc((void**)&d_C, sizeof(float));
cudaMalloc((void**)&d_res, sizeof(float));
// cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
// compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations);
compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence);
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout << "GPU Elapsed Time = " << time << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost);
return 0;
}
|
546d49b4ec607bb10f71b82fcb7eb9c8ae4d15fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Find BLANK and replace your own code.
* And submit report why do you replace the blank that way.
*/
#include<stdlib.h>
#include<iostream>
#include<fstream>
#include<vector>
#include<string>
#include "gputimer.h"
#define TILE_WIDTH 16 /* set TILE_WIDTH 16 for the evaluation! */
#define MAXPOOL_INPUT_FILENAME "input.txt"
#define A_FILENAME "a.txt"
#define B_FILENAME "b.txt"
#define C_FILENAME "c.txt"
using namespace std;
__global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size) {
// a, b, c : input matrix address
// alpha, beta : input constant
// output : output buffer address
// input_size : width, height of input matrix
// all input, output matrices are vectorized
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int i = blockIdx.x * TILE_WIDTH, j = blockIdx.y * TILE_WIDTH;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_c[TILE_WIDTH][TILE_WIDTH];
float sum = 0.0f;
for (int i = 0; i < ceilf(input_size/TILE_WIDTH) + 1; i++) {
s_a[ty][tx] = 0.0f; // to ignore uneffected values
// boundary check
if (row < input_size && (TILE_WIDTH * i + tx) < input_size) {
s_a[ty][tx] = a[row * input_size + TILE_WIDTH * i + tx];
}
s_b[ty][tx] = 0.0f; // to ignore uneffected values
// boundary check
if (col < input_size && (i * TILE_WIDTH + ty) < input_size) {
s_b[ty][tx] = b[(i * TILE_WIDTH + ty) * input_size + col];
}
__syncthreads(); // barrier
for (int j = 0; j<TILE_WIDTH; j++) {
sum += s_a[ty][j] * s_b[j][tx]; // get tile sum for block
}
__syncthreads(); // barrier
}
if (row < input_size && col < input_size) {
int index = (i + tx) + (j + ty)*input_size;
s_c[ty][tx] = c[index];
output[index] = alpha * sum + beta * s_c[ty][tx];
}
}
int main(int argc, char **argv) {
GpuTimer timer;
if(argc < 2) {
cout << "usage : " << argv[0] << " alpha beta\n" << "example : " << argv[0] << " 100 0.5 0.8\n";
return 1;
}
const int input_size = stoi(argv[1]);
const float alpha = stof(argv[2]);
const float beta = stof(argv[3]);
// check input_size is power of 2
if(input_size == 0 && (input_size & (input_size-1))){
cout << "input_size must be power of 2\n";
return 1;
}
float* a = new float[input_size*input_size];
float* b = new float[input_size*input_size];
float* c = new float[input_size*input_size];
// read input matrices
ifstream a_in(A_FILENAME);
ifstream b_in(B_FILENAME);
ifstream c_in(C_FILENAME);
for (int i = 0; i < input_size*input_size; ++i) {
a[i] = rand() % 10;
b[i] = rand() % 10;
c[i] = rand() % 10;
}
// prints inputs for debugging.
cout<<"\n\nalpha : "<<alpha<<'\n';
cout<<"========== A ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<a[i]<<", ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<"\n========== B ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<b[i]<<", ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<"\n\nbeta : "<<beta<<'\n';
cout<<"========== C ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<c[i]<<", ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<'\n';
// set thread, block dimensions
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
const dim3 num_of_blocks(input_size/block_size.x + 1, input_size/block_size.y + 1);
// memory allocation for the device
float *dev_mem_a, *dev_mem_b, *dev_mem_c, *gemm_output;
hipMalloc(&dev_mem_a, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_b, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_c, sizeof(float) * input_size * input_size);
hipMalloc(&gemm_output, sizeof(float) * input_size * input_size);
// copy variable to device memory
hipMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
timer.Start();
// launch CUDA kernels
// First launch gemm kernel
hipLaunchKernelGGL(( gemm), dim3(num_of_blocks), dim3(block_size), 0, 0, dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size);
timer.Stop();
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr, "ERROR %s\n", hipGetErrorString(error));
return 1;
}
// allocate output buf in main memory
float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size);
// copy results from device to host
hipMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, hipMemcpyDeviceToHost);
// prints the results
cout<<"\n========== GEMM OUTPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<gemm_output_buf[i]<<" ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<'\n';
cout << "\n ===> Time elapsed = " << timer.Elapsed() << " ms\n";
hipFree(dev_mem_a);
hipFree(dev_mem_b);
hipFree(dev_mem_c);
hipFree(gemm_output);
free(gemm_output_buf);
delete[] a;
delete[] b;
delete[] c;
return 0;
}
| 546d49b4ec607bb10f71b82fcb7eb9c8ae4d15fc.cu | /*
* Find BLANK and replace your own code.
* And submit report why do you replace the blank that way.
*/
#include<stdlib.h>
#include<iostream>
#include<fstream>
#include<vector>
#include<string>
#include "gputimer.h"
#define TILE_WIDTH 16 /* set TILE_WIDTH 16 for the evaluation! */
#define MAXPOOL_INPUT_FILENAME "input.txt"
#define A_FILENAME "a.txt"
#define B_FILENAME "b.txt"
#define C_FILENAME "c.txt"
using namespace std;
__global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size) {
// a, b, c : input matrix address
// alpha, beta : input constant
// output : output buffer address
// input_size : width, height of input matrix
// all input, output matrices are vectorized
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int i = blockIdx.x * TILE_WIDTH, j = blockIdx.y * TILE_WIDTH;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_c[TILE_WIDTH][TILE_WIDTH];
float sum = 0.0f;
for (int i = 0; i < ceilf(input_size/TILE_WIDTH) + 1; i++) {
s_a[ty][tx] = 0.0f; // to ignore uneffected values
// boundary check
if (row < input_size && (TILE_WIDTH * i + tx) < input_size) {
s_a[ty][tx] = a[row * input_size + TILE_WIDTH * i + tx];
}
s_b[ty][tx] = 0.0f; // to ignore uneffected values
// boundary check
if (col < input_size && (i * TILE_WIDTH + ty) < input_size) {
s_b[ty][tx] = b[(i * TILE_WIDTH + ty) * input_size + col];
}
__syncthreads(); // barrier
for (int j = 0; j<TILE_WIDTH; j++) {
sum += s_a[ty][j] * s_b[j][tx]; // get tile sum for block
}
__syncthreads(); // barrier
}
if (row < input_size && col < input_size) {
int index = (i + tx) + (j + ty)*input_size;
s_c[ty][tx] = c[index];
output[index] = alpha * sum + beta * s_c[ty][tx];
}
}
int main(int argc, char **argv) {
GpuTimer timer;
if(argc < 2) {
cout << "usage : " << argv[0] << " alpha beta\n" << "example : " << argv[0] << " 100 0.5 0.8\n";
return 1;
}
const int input_size = stoi(argv[1]);
const float alpha = stof(argv[2]);
const float beta = stof(argv[3]);
// check input_size is power of 2
if(input_size == 0 && (input_size & (input_size-1))){
cout << "input_size must be power of 2\n";
return 1;
}
float* a = new float[input_size*input_size];
float* b = new float[input_size*input_size];
float* c = new float[input_size*input_size];
// read input matrices
ifstream a_in(A_FILENAME);
ifstream b_in(B_FILENAME);
ifstream c_in(C_FILENAME);
for (int i = 0; i < input_size*input_size; ++i) {
a[i] = rand() % 10;
b[i] = rand() % 10;
c[i] = rand() % 10;
}
// prints inputs for debugging.
cout<<"\n\nalpha : "<<alpha<<'\n';
cout<<"========== A ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<a[i]<<", ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<"\n========== B ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<b[i]<<", ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<"\n\nbeta : "<<beta<<'\n';
cout<<"========== C ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<c[i]<<", ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<'\n';
// set thread, block dimensions
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
const dim3 num_of_blocks(input_size/block_size.x + 1, input_size/block_size.y + 1);
// memory allocation for the device
float *dev_mem_a, *dev_mem_b, *dev_mem_c, *gemm_output;
cudaMalloc(&dev_mem_a, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_b, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_c, sizeof(float) * input_size * input_size);
cudaMalloc(&gemm_output, sizeof(float) * input_size * input_size);
// copy variable to device memory
cudaMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
timer.Start();
// launch CUDA kernels
// First launch gemm kernel
gemm<<<num_of_blocks, block_size>>>(dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size);
timer.Stop();
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error));
return 1;
}
// allocate output buf in main memory
float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size);
// copy results from device to host
cudaMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, cudaMemcpyDeviceToHost);
// prints the results
cout<<"\n========== GEMM OUTPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<gemm_output_buf[i]<<" ";
if (input_size > 100) {
cout << "\n.......";
break;
}
}
cout<<'\n';
cout << "\n ===> Time elapsed = " << timer.Elapsed() << " ms\n";
cudaFree(dev_mem_a);
cudaFree(dev_mem_b);
cudaFree(dev_mem_c);
cudaFree(gemm_output);
free(gemm_output_buf);
delete[] a;
delete[] b;
delete[] c;
return 0;
}
|
5dd976da23193503b577ab351cfcb2ab346371bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <math.h>
#define MAX_THREADS_PER_BLOCK 1024
#define DRAW_GRADIENT_MAP true
__global__ void ColorBufferFillKernel(uchar3 *dary, float t, int DIMX, int DIMY, int numBlocksWithSameColorForH, int numBlocksWithSameColorForW)
{
/* Insert your kernel here */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Ignore threads outside the canvas range (imperfect division in number of blocks)
if (i >= DIMX)
return;
if (j >= DIMY)
return;
// Since the array is ordered in WHC format
int offset = (i) + (j * DIMY);
uchar3 color;
#if DRAW_GRADIENT_MAP
// color = make_uchar3(((float)i / DIMX) * 256, ((float)j / DIMY) * 256 , 0);
// Distinct color for each block
// color = make_uchar3(((float)i / DIMX) * 256, ((float)j / DIMY) * 256, 0);
// color = make_uchar3(((float)blockIdx.x / gridDim.x) * 255, ((float)blockIdx.y / gridDim.y) * 255, 0);
int blockColorIdxX = blockIdx.x / numBlocksWithSameColorForW + 1;
int normalizerX = gridDim.x / numBlocksWithSameColorForW + 1;
float xProportion = (float)((blockIdx.x % numBlocksWithSameColorForW) * blockDim.x + threadIdx.x) / (numBlocksWithSameColorForW * blockDim.x);
int blockColorIdxY = blockIdx.y / numBlocksWithSameColorForH + 1;
int normalizerY = gridDim.y / numBlocksWithSameColorForH + 1;
float yProportion = (float)((blockIdx.y % numBlocksWithSameColorForH) * blockDim.y + threadIdx.y) / (numBlocksWithSameColorForH * blockDim.y);
int currentBlockColorX = (((float)blockColorIdxX / normalizerX) * 255);
int currentBlockColorY = (((float)blockColorIdxY / normalizerY) * 255);
// Get last block colors
int lastBlockXColor = 0, lastBlockYColor = 0;
if (blockColorIdxX > 0)
lastBlockXColor = ((float)(blockColorIdxX - 1) / normalizerX) * 255;
if (blockColorIdxY > 0)
lastBlockYColor = ((float)(blockColorIdxY - 1) / normalizerX) * 255;
// color = make_uchar3(((float)blockColorIdxX / normalizerX) * 255, ((float)blockColorIdxY / normalizerY) * 255, 0);
color = make_uchar3((xProportion) * currentBlockColorX + (1.0 - xProportion) * lastBlockXColor,
(yProportion) * currentBlockColorY + (1.0 - yProportion) * lastBlockYColor, 0);
#else
int blockColorIdxX = blockIdx.x / numBlocksWithSameColorForW;
int normalizerX = gridDim.x / numBlocksWithSameColorForW;
int blockColorIdxY = blockIdx.y / numBlocksWithSameColorForH;
int normalizerY = gridDim.y / numBlocksWithSameColorForH;
color = make_uchar3(((float)blockColorIdxX / normalizerX) * 255, ((float)blockColorIdxY / normalizerY) * 255, 0);
#endif
dary[offset] = color;
}
void simulate(uchar3 *ptr, int tick, int w, int h)
{
/* ptr is a pointer to an array of size w*h*sizeof(uchar3).
uchar3 is a structure with x,y,z coordinates to contain
red,yellow,blue - values for a pixel (Range [0,255])
*/
hipError_t err=hipSuccess;
hipEvent_t start,stop;
float elapsedtime;
hipEventCreate ( &start);
hipEventCreate ( &stop);
hipEventRecord(start);
/* Space for
Yourkernel
*/
int divisions = 3; // 9 blocks
int blockDim = 25;
// Pick the ideal dimensions of kernel
dim3 dimBlock(blockDim, blockDim);
// dim3 dimBlock((int)(w / divisions), (int)(h / divisions));
dim3 dimGrid((h + dimBlock.y - 1) / dimBlock.y, (w + dimBlock.x - 1) / dimBlock.x);
printf("Grid dims: (%d, %d)\n", dimGrid.x, dimGrid.y);
// Determine the number of kernels to be colored the same
int numBlocksWithSameColorForH = floor(h / (divisions * blockDim));
int numBlocksWithSameColorForW = floor(w / (divisions * blockDim));
printf("Number of blocks with same color for H: %d and for W: %d\n", numBlocksWithSameColorForH, numBlocksWithSameColorForW);
// Start the kernel
hipLaunchKernelGGL(( ColorBufferFillKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ptr, tick, w, h, numBlocksWithSameColorForH, numBlocksWithSameColorForW);
err=hipGetLastError();
if(err!=hipSuccess) {
fprintf(stderr,"Error executing the kernel - %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedtime, start, stop);
printf("Time used: %.1f (ms)\n",elapsedtime);
hipEventDestroy ( start);
hipEventDestroy ( stop);
printf("Please type ESC in graphics and afterwards RETURN in cmd-screen to finish\n");
}
| 5dd976da23193503b577ab351cfcb2ab346371bc.cu | #include <stdio.h>
#include <assert.h>
#include <math.h>
#define MAX_THREADS_PER_BLOCK 1024
#define DRAW_GRADIENT_MAP true
__global__ void ColorBufferFillKernel(uchar3 *dary, float t, int DIMX, int DIMY, int numBlocksWithSameColorForH, int numBlocksWithSameColorForW)
{
/* Insert your kernel here */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Ignore threads outside the canvas range (imperfect division in number of blocks)
if (i >= DIMX)
return;
if (j >= DIMY)
return;
// Since the array is ordered in WHC format
int offset = (i) + (j * DIMY);
uchar3 color;
#if DRAW_GRADIENT_MAP
// color = make_uchar3(((float)i / DIMX) * 256, ((float)j / DIMY) * 256 , 0);
// Distinct color for each block
// color = make_uchar3(((float)i / DIMX) * 256, ((float)j / DIMY) * 256, 0);
// color = make_uchar3(((float)blockIdx.x / gridDim.x) * 255, ((float)blockIdx.y / gridDim.y) * 255, 0);
int blockColorIdxX = blockIdx.x / numBlocksWithSameColorForW + 1;
int normalizerX = gridDim.x / numBlocksWithSameColorForW + 1;
float xProportion = (float)((blockIdx.x % numBlocksWithSameColorForW) * blockDim.x + threadIdx.x) / (numBlocksWithSameColorForW * blockDim.x);
int blockColorIdxY = blockIdx.y / numBlocksWithSameColorForH + 1;
int normalizerY = gridDim.y / numBlocksWithSameColorForH + 1;
float yProportion = (float)((blockIdx.y % numBlocksWithSameColorForH) * blockDim.y + threadIdx.y) / (numBlocksWithSameColorForH * blockDim.y);
int currentBlockColorX = (((float)blockColorIdxX / normalizerX) * 255);
int currentBlockColorY = (((float)blockColorIdxY / normalizerY) * 255);
// Get last block colors
int lastBlockXColor = 0, lastBlockYColor = 0;
if (blockColorIdxX > 0)
lastBlockXColor = ((float)(blockColorIdxX - 1) / normalizerX) * 255;
if (blockColorIdxY > 0)
lastBlockYColor = ((float)(blockColorIdxY - 1) / normalizerX) * 255;
// color = make_uchar3(((float)blockColorIdxX / normalizerX) * 255, ((float)blockColorIdxY / normalizerY) * 255, 0);
color = make_uchar3((xProportion) * currentBlockColorX + (1.0 - xProportion) * lastBlockXColor,
(yProportion) * currentBlockColorY + (1.0 - yProportion) * lastBlockYColor, 0);
#else
int blockColorIdxX = blockIdx.x / numBlocksWithSameColorForW;
int normalizerX = gridDim.x / numBlocksWithSameColorForW;
int blockColorIdxY = blockIdx.y / numBlocksWithSameColorForH;
int normalizerY = gridDim.y / numBlocksWithSameColorForH;
color = make_uchar3(((float)blockColorIdxX / normalizerX) * 255, ((float)blockColorIdxY / normalizerY) * 255, 0);
#endif
dary[offset] = color;
}
void simulate(uchar3 *ptr, int tick, int w, int h)
{
/* ptr is a pointer to an array of size w*h*sizeof(uchar3).
uchar3 is a structure with x,y,z coordinates to contain
red,yellow,blue - values for a pixel (Range [0,255])
*/
cudaError_t err=cudaSuccess;
cudaEvent_t start,stop;
float elapsedtime;
cudaEventCreate ( &start);
cudaEventCreate ( &stop);
cudaEventRecord(start);
/* Space for
Yourkernel
*/
int divisions = 3; // 9 blocks
int blockDim = 25;
// Pick the ideal dimensions of kernel
dim3 dimBlock(blockDim, blockDim);
// dim3 dimBlock((int)(w / divisions), (int)(h / divisions));
dim3 dimGrid((h + dimBlock.y - 1) / dimBlock.y, (w + dimBlock.x - 1) / dimBlock.x);
printf("Grid dims: (%d, %d)\n", dimGrid.x, dimGrid.y);
// Determine the number of kernels to be colored the same
int numBlocksWithSameColorForH = floor(h / (divisions * blockDim));
int numBlocksWithSameColorForW = floor(w / (divisions * blockDim));
printf("Number of blocks with same color for H: %d and for W: %d\n", numBlocksWithSameColorForH, numBlocksWithSameColorForW);
// Start the kernel
ColorBufferFillKernel<<<dimGrid, dimBlock>>>(ptr, tick, w, h, numBlocksWithSameColorForH, numBlocksWithSameColorForW);
err=cudaGetLastError();
if(err!=cudaSuccess) {
fprintf(stderr,"Error executing the kernel - %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedtime, start, stop);
printf("Time used: %.1f (ms)\n",elapsedtime);
cudaEventDestroy ( start);
cudaEventDestroy ( stop);
printf("Please type ESC in graphics and afterwards RETURN in cmd-screen to finish\n");
}
|
67e7ba68e38835ec780d4ea15fb8651eb7bad7a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// example of using CUDA streams
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
#include <math.h>
#include <fstream>
using namespace std::chrono;
#define BLOCK_SIZE 256
void initWithNoStream(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void reduceVector(float *g_data, int N) {
__shared__ float sdata[2*BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * 2 * blockDim.x + threadIdx.x;
unsigned int gridStride = 2 * blockDim.x * gridDim.x;
sdata[tid] = 0;
while (i < N) {
sdata[tid] += g_data[i] + g_data[i + blockDim.x];
i += gridStride;
}
for(unsigned j = blockDim.x / 2; j > 32; j >>= 1) {
__syncthreads();
if (tid < j) {
sdata[tid] += sdata[tid + j];
}
}
//printf("tid %d i %d blockIdx.x %d sdata[%d] %f\n", tid, i, blockIdx.x, tid, sdata[tid]);
__syncthreads();
//
if(tid < 32) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
if (tid == 0) g_data[blockIdx.x] = sdata[0];
}
__global__
void reduceVector_naive(float *g_data, int N) {
__shared__ float sdata[2*BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * 2 * blockDim.x + threadIdx.x;
sdata[tid] = g_data[i];
sdata[blockDim.x+tid] = g_data[i + blockDim.x];
for(unsigned int j = 1; j <= blockDim.x; j *= 2) {
__syncthreads();
if (tid % j == 0) {
sdata[2*tid] += sdata[2*tid + j];
}
}
__syncthreads();
if (tid == 0) g_data[blockIdx.x] = sdata[tid];
}
int main(int argc, char** argv)
{
int in_s = 14;
char* pEnd;
if (!in_s && argc < 2) {
printf("Podaj liczbe ktra bdzie przesuniciem 2 ktre jest rozmiarem\n");
return 1;
} else if(!in_s) {
in_s = strtol(argv[1], &pEnd, 10);
}
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
std::ofstream host_file, device_file, device_naive_file;
host_file.open ("reduction_host.txt");
device_file.open ("reduction_device.txt");
device_naive_file.open ("reduction__device_naive.txt");
for(int in_s = 14; in_s < 27; in_s++) {
const int N = 2<<in_s;
host_file << N;
device_file << N;
device_naive_file << N;
size_t size = N * sizeof(float);
size_t threads;
size_t blocks;
threads = BLOCK_SIZE;
blocks = N / threads / 2;
printf("threads %d blocks %d\n", threads, blocks);
hipError_t addVectorsErr;
hipError_t asyncErr;
for(int k = 0; k < 10; k++) {
float *a;
float *b;
float *c;
hipMallocManaged(&b, size);
hipMallocManaged(&a, size);
hipMallocManaged(&c, size);
initWithNoStream(1, a, N);
initWithNoStream(1, b, N);
initWithNoStream(1, c, N);
hipMemPrefetchAsync(a, size, deviceId);
hipMemPrefetchAsync(b, size, deviceId);
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Device sync Error: %s\n", hipGetErrorString(asyncErr));
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( reduceVector), dim3(32 * numberOfSMs), dim3(threads), 0, 0, a, N);
hipMemPrefetchAsync(a, size, hipCpuDeviceId);
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Device sync Error: %s\n", hipGetErrorString(asyncErr));
for(int i = 1; i < 32 * numberOfSMs; i++) {
a[0] += a[i];
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout<< "Reduction time in us: " << duration.count()<< std::endl;
device_file << "\t" << duration.count();
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Reduction Error: %s\n", hipGetErrorString(addVectorsErr));
start = high_resolution_clock::now();
hipLaunchKernelGGL(( reduceVector_naive), dim3(blocks), dim3(threads), 0, 0, b, N);
hipMemPrefetchAsync(b, size, hipCpuDeviceId);
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Device sync Error: %s\n", hipGetErrorString(asyncErr));
for(int i = 1; i < blocks; i++) {
b[0] += b[i];
}
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
std::cout<< "Reduction (naive) time in us: " << duration.count()<< std::endl;
device_naive_file << "\t" << duration.count();
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Reduction Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Device sync Error: %s\n", hipGetErrorString(asyncErr));
double result = 0;
start = high_resolution_clock::now();
for(long i = 0; i < N; i++) {
result += c[i];
}
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
std::cout<< "Cpu add rest time in us: " << duration.count() << std::endl;
host_file << "\t" << duration.count();
printf("%f %f %f %d\n", a[0], b[0], result, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
}
}
| 67e7ba68e38835ec780d4ea15fb8651eb7bad7a8.cu | // example of using CUDA streams
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
#include <math.h>
#include <fstream>
using namespace std::chrono;
#define BLOCK_SIZE 256
void initWithNoStream(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void reduceVector(float *g_data, int N) {
__shared__ float sdata[2*BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * 2 * blockDim.x + threadIdx.x;
unsigned int gridStride = 2 * blockDim.x * gridDim.x;
sdata[tid] = 0;
while (i < N) {
sdata[tid] += g_data[i] + g_data[i + blockDim.x];
i += gridStride;
}
for(unsigned j = blockDim.x / 2; j > 32; j >>= 1) {
__syncthreads();
if (tid < j) {
sdata[tid] += sdata[tid + j];
}
}
//printf("tid %d i %d blockIdx.x %d sdata[%d] %f\n", tid, i, blockIdx.x, tid, sdata[tid]);
__syncthreads();
//
if(tid < 32) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
if (tid == 0) g_data[blockIdx.x] = sdata[0];
}
__global__
void reduceVector_naive(float *g_data, int N) {
__shared__ float sdata[2*BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * 2 * blockDim.x + threadIdx.x;
sdata[tid] = g_data[i];
sdata[blockDim.x+tid] = g_data[i + blockDim.x];
for(unsigned int j = 1; j <= blockDim.x; j *= 2) {
__syncthreads();
if (tid % j == 0) {
sdata[2*tid] += sdata[2*tid + j];
}
}
__syncthreads();
if (tid == 0) g_data[blockIdx.x] = sdata[tid];
}
int main(int argc, char** argv)
{
int in_s = 14;
char* pEnd;
if (!in_s && argc < 2) {
printf("Podaj liczbe która będzie przesunięciem 2 które jest rozmiarem\n");
return 1;
} else if(!in_s) {
in_s = strtol(argv[1], &pEnd, 10);
}
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
std::ofstream host_file, device_file, device_naive_file;
host_file.open ("reduction_host.txt");
device_file.open ("reduction_device.txt");
device_naive_file.open ("reduction__device_naive.txt");
for(int in_s = 14; in_s < 27; in_s++) {
const int N = 2<<in_s;
host_file << N;
device_file << N;
device_naive_file << N;
size_t size = N * sizeof(float);
size_t threads;
size_t blocks;
threads = BLOCK_SIZE;
blocks = N / threads / 2;
printf("threads %d blocks %d\n", threads, blocks);
cudaError_t addVectorsErr;
cudaError_t asyncErr;
for(int k = 0; k < 10; k++) {
float *a;
float *b;
float *c;
cudaMallocManaged(&b, size);
cudaMallocManaged(&a, size);
cudaMallocManaged(&c, size);
initWithNoStream(1, a, N);
initWithNoStream(1, b, N);
initWithNoStream(1, c, N);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Device sync Error: %s\n", cudaGetErrorString(asyncErr));
auto start = high_resolution_clock::now();
reduceVector<<<32 * numberOfSMs, threads>>>(a, N);
cudaMemPrefetchAsync(a, size, cudaCpuDeviceId);
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Device sync Error: %s\n", cudaGetErrorString(asyncErr));
for(int i = 1; i < 32 * numberOfSMs; i++) {
a[0] += a[i];
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout<< "Reduction time in us: " << duration.count()<< std::endl;
device_file << "\t" << duration.count();
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Reduction Error: %s\n", cudaGetErrorString(addVectorsErr));
start = high_resolution_clock::now();
reduceVector_naive<<<blocks, threads>>>(b, N);
cudaMemPrefetchAsync(b, size, cudaCpuDeviceId);
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Device sync Error: %s\n", cudaGetErrorString(asyncErr));
for(int i = 1; i < blocks; i++) {
b[0] += b[i];
}
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
std::cout<< "Reduction (naive) time in us: " << duration.count()<< std::endl;
device_naive_file << "\t" << duration.count();
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Reduction Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Device sync Error: %s\n", cudaGetErrorString(asyncErr));
double result = 0;
start = high_resolution_clock::now();
for(long i = 0; i < N; i++) {
result += c[i];
}
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
std::cout<< "Cpu add rest time in us: " << duration.count() << std::endl;
host_file << "\t" << duration.count();
printf("%f %f %f %d\n", a[0], b[0], result, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
}
}
|
dad91aea65e67d9bb2eef16b1e7e17a386c12668.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file cooling_cuda.cu
* \brief Functions to calculate cooling rate for a given rho, P, dt. */
#ifdef CUDA
#ifdef COOLING_GPU
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"cooling_cuda.h"
extern texture<float, 2, hipReadModeElementType> coolTexObj;
extern texture<float, 2, hipReadModeElementType> heatTexObj;
/*! \fn void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma)
* \brief When passed an array of conserved variables and a timestep, adjust the value
of the total energy for each cell according to the specified cooling function. */
__global__ void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma, Real *dt_array)
{
__shared__ Real min_dt[TPB];
int n_cells = nx*ny*nz;
int is, ie, js, je, ks, ke;
is = n_ghost;
ie = nx-n_ghost;
if (ny == 1) {
js = 0;
je = 1;
}
else {
js = n_ghost;
je = ny-n_ghost;
}
if (nz == 1) {
ks = 0;
ke = 1;
}
else {
ks = n_ghost;
ke = nz-n_ghost;
}
Real d, E;
Real n, T, T_init;
Real del_T, dt_sub;
Real mu; // mean molecular weight
Real cool; //cooling rate per volume, erg/s/cm^3
//#ifndef DE
Real vx, vy, vz, p;
//#endif
#ifdef DE
Real ge;
#endif
Real T_min = 1.0e4; // minimum temperature allowed
mu = 0.6;
//mu = 1.27;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int id = threadIdx.x + blockId * blockDim.x;
int zid = id / (nx*ny);
int yid = (id - zid*nx*ny) / nx;
int xid = id - zid*nx*ny - yid*nx;
// and a thread id within the block
int tid = threadIdx.x;
// set min dt to a high number
min_dt[tid] = 1e10;
__syncthreads();
// only threads corresponding to real cells do the calculation
if (xid >= is && xid < ie && yid >= js && yid < je && zid >= ks && zid < ke) {
// load values of density and pressure
d = dev_conserved[ id];
E = dev_conserved[4*n_cells + id];
// don't apply cooling if this thread crashed
if (E < 0.0 || E != E) return;
//#ifndef DE
vx = dev_conserved[1*n_cells + id] / d;
vy = dev_conserved[2*n_cells + id] / d;
vz = dev_conserved[3*n_cells + id] / d;
p = (E - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
p = fmax(p, (Real) TINY_NUMBER);
//#endif
#ifdef DE
ge = dev_conserved[(n_fields-1)*n_cells + id] / d;
ge = fmax(ge, (Real) TINY_NUMBER);
#endif
// calculate the number density of the gas (in cgs)
n = d*DENSITY_UNIT / (mu * MP);
// calculate the temperature of the gas
//#ifndef DE
T_init = p*PRESSURE_UNIT/ (n*KB);
//#endif
#ifdef DE
//T_init = ge*(gamma-1.0)*SP_ENERGY_UNIT*mu*MP/KB;
T_init = d*ge*(gamma-1.0)*PRESSURE_UNIT/(n*KB);
#endif
// calculate cooling rate per volume
T = T_init;
//if (T > T_max) printf("%3d %3d %3d High T cell. n: %e T: %e\n", xid, yid, zid, n, T);
// call the cooling function
//cool = CIE_cool(n, T);
cool = Cloudy_cool(n, T);
// calculate change in temperature given dt
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
// limit change in temperature to 1%
while (del_T/T > 0.01) {
// what dt gives del_T = 0.01*T?
dt_sub = 0.01*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
// apply that dt
T -= cool*dt_sub*TIME_UNIT*(gamma-1.0)/(n*KB);
// how much time is left from the original timestep?
dt -= dt_sub;
// calculate cooling again
//cool = CIE_cool(n, T);
cool = Cloudy_cool(n, T);
// calculate new change in temperature
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
}
// calculate final temperature
T -= del_T;
// set a temperature floor
// (don't change this cell if the thread crashed)
//if (T > 0.0 && E > 0.0) T = fmax(T, T_min);
// set a temperature ceiling
//T = fmin(T, T_max);
// adjust value of energy based on total change in temperature
del_T = T_init - T; // total change in T
E -= n*KB*del_T / ((gamma-1.0)*ENERGY_UNIT);
#ifdef DE
ge -= KB*del_T / (mu*MP*(gamma-1.0)*SP_ENERGY_UNIT);
#endif
// calculate cooling rate for new T
//cool = CIE_cool(n, T);
cool = Cloudy_cool(n, T);
printf("%d %d %d %e %e %e\n", xid, yid, zid, n, T, cool);
// only use good cells in timestep calculation (in case some have crashed)
if (n > 0 && T > 0 && cool > 0.0) {
// limit the timestep such that delta_T is 10%
min_dt[tid] = 0.1*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
}
// and send back from kernel
dev_conserved[4*n_cells + id] = E;
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] = d*ge;
#endif
}
__syncthreads();
// do the reduction in shared memory (find the min timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
min_dt[tid] = fmin(min_dt[tid], min_dt[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dt_array[blockIdx.x] = min_dt[0];
}
/* \fn __device__ Real test_cool(Real n, Real T)
* \brief Cooling function from Creasey 2011. */
__device__ Real test_cool(int tid, Real n, Real T)
{
Real T0, T1, lambda, cool;
T0 = 10000.0;
T1 = 20*T0;
cool = 0.0;
//lambda = 5.0e-24; //cooling coefficient, 5e-24 erg cm^3 s^-1
lambda = 5.0e-20; //cooling coefficient, 5e-24 erg cm^3 s^-1
// constant cooling rate
//cool = n*n*lambda;
// Creasey cooling function
if (T >= T0 && T <= 0.5*(T1+T0)) {
cool = n*n*lambda*(T - T0) / T0;
}
if (T >= 0.5*(T1+T0) && T <= T1) {
cool = n*n*lambda*(T1 - T) / T0;
}
//printf("%d %f %f\n", tid, T, cool);
return cool;
}
/* \fn __device__ Real primordial_cool(Real n, Real T)
* \brief Primordial hydrogen/helium cooling curve
derived according to Katz et al. 1996. */
__device__ Real primordial_cool(Real n, Real T)
{
Real n_h, Y, y, g_ff, cool;
Real n_h0, n_hp, n_he0, n_hep, n_hepp, n_e, n_e_old;
Real alpha_hp, alpha_hep, alpha_d, alpha_hepp, gamma_eh0, gamma_ehe0, gamma_ehep;
Real le_h0, le_hep, li_h0, li_he0, li_hep, lr_hp, lr_hep, lr_hepp, ld_hep, l_ff;
Real gamma_lh0, gamma_lhe0, gamma_lhep, e_h0, e_he0, e_hep, H;
int heat_flag, n_iter;
Real diff, tol;
// set flag to 1 for photoionization & heating
heat_flag = 0;
//Real X = 0.76; //hydrogen abundance by mass
Y = 0.24; //helium abundance by mass
y = Y/(4 - 4*Y);
// set the hydrogen number density
n_h = n;
// calculate the recombination and collisional ionziation rates
// (Table 2 from Katz 1996)
alpha_hp = (8.4e-11) * (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
alpha_hep = (1.5e-10) * (pow(T,(-0.6353)));
alpha_d = (1.9e-3) * (pow(T,(-1.5))) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T));
alpha_hepp = (3.36e-10)* (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
gamma_eh0 = (5.85e-11)* sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehe0 = (2.38e-11)* sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehep = (5.68e-12)* sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5)));
// externally evaluated integrals for photoionziation rates
// assumed J(nu) = 10^-22 (nu_L/nu)
gamma_lh0 = 3.19851e-13;
gamma_lhe0 = 3.13029e-13;
gamma_lhep = 2.00541e-14;
// externally evaluated integrals for heating rates
e_h0 = 2.4796e-24;
e_he0 = 6.86167e-24;
e_hep = 6.21868e-25;
// assuming no photoionization, solve equations for number density of
// each species
n_e = n_h; //as a first guess, use the hydrogen number density
n_iter = 20;
diff = 1.0;
tol = 1.0e-6;
if (heat_flag) {
for (int i=0; i<n_iter; i++) {
n_e_old = n_e;
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0 + gamma_lh0/n_e);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0 + gamma_lhe0/n_e) + (gamma_ehep + gamma_lhep/n_e)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0 + gamma_lhe0/n_e);
n_hepp = n_hep*(gamma_ehep + gamma_lhep/n_e)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
diff = fabs(n_e_old - n_e);
if (diff < tol) break;
}
}
else {
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0) + (gamma_ehep)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0);
n_hepp = n_hep*(gamma_ehep)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
}
// using number densities, calculate cooling rates for
// various processes (Table 1 from Katz 1996)
le_h0 = (7.50e-19) * exp(-118348.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
le_hep = (5.54e-17) * pow(T,(-0.397)) * exp(-473638.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
li_h0 = (1.27e-21) * sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
li_he0 = (9.38e-22) * sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_he0;
li_hep = (4.95e-22) * sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
lr_hp = (8.70e-27) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hp;
lr_hep = (1.55e-26) * pow(T,(0.3647)) * n_e * n_hep;
lr_hepp = (3.48e-26) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hepp;
ld_hep = (1.24e-13) * pow(T,(-1.5)) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T)) * n_e * n_hep;
g_ff = 1.1 + 0.34*exp(-(5.5-log(T))*(5.5-log(T))/3.0); // Gaunt factor
l_ff = (1.42e-27) * g_ff * sqrt(T) * (n_hp + n_hep + 4*n_hepp) * n_e;
// calculate total cooling rate (erg s^-1 cm^-3)
cool = le_h0 + le_hep + li_h0 + li_he0 + li_hep + lr_hp + lr_hep + lr_hepp + ld_hep + l_ff;
// calculate total photoionization heating rate
H = 0.0;
if (heat_flag) {
H = n_h0*e_h0 + n_he0*e_he0 + n_hep*e_hep;
}
cool -= H;
return cool;
}
/* \fn __device__ Real CIE_cool(Real n, Real T)
* \brief Analytic fit to a solar metallicity CIE cooling curve
calculated using Cloudy. */
__device__ Real CIE_cool(Real n, Real T)
{
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
// fit to CIE cooling function
if (log10(T) < 4.0) {
lambda = 0.0;
}
else if (log10(T) >= 4.0 && log10(T) < 5.9) {
lambda = pow(10.0, (-1.3 * (log10(T) - 5.25) * (log10(T) - 5.25) - 21.25));
}
else if (log10(T) >= 5.9 && log10(T) < 7.4) {
lambda = pow(10.0, (0.7 * (log10(T) - 7.1) * (log10(T) - 7.1) - 22.8));
}
else {
lambda = pow(10.0, (0.45*log10(T) - 26.065));
}
// cooling rate per unit volume
cool = n*n*lambda;
return cool;
}
/* \fn __device__ Real Cloudy_cool(Real n, Real T)
* \brief Uses texture mapping to interpolate Cloudy cooling/heating
tables at z = 0 with solar metallicity and an HM05 UV background. */
__device__ Real Cloudy_cool(Real n, Real T)
{
#ifdef CLOUDY_COOL
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real H = 0.0; //heating rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
float log_n, log_T;
log_n = log10(n);
log_T = log10(T);
// remap coordinates for texture
log_T = (log_T - 1.0)/8.1;
log_n = (log_n + 6.0)/12.1;
// don't cool below 10 K
if (log10(T) > 1.0) {
lambda = tex2D<float>(coolTexObj, log_T, log_n);
}
else lambda = 0.0;
H = tex2D<float>(heatTexObj, log_T, log_n);
// cooling rate per unit volume
cool = n*n*(powf(10, lambda) - powf(10, H));
return cool;
#endif
}
#endif //COOLING_GPU
#endif //CUDA
| dad91aea65e67d9bb2eef16b1e7e17a386c12668.cu | /*! \file cooling_cuda.cu
* \brief Functions to calculate cooling rate for a given rho, P, dt. */
#ifdef CUDA
#ifdef COOLING_GPU
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"cooling_cuda.h"
extern texture<float, 2, cudaReadModeElementType> coolTexObj;
extern texture<float, 2, cudaReadModeElementType> heatTexObj;
/*! \fn void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma)
* \brief When passed an array of conserved variables and a timestep, adjust the value
of the total energy for each cell according to the specified cooling function. */
__global__ void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma, Real *dt_array)
{
__shared__ Real min_dt[TPB];
int n_cells = nx*ny*nz;
int is, ie, js, je, ks, ke;
is = n_ghost;
ie = nx-n_ghost;
if (ny == 1) {
js = 0;
je = 1;
}
else {
js = n_ghost;
je = ny-n_ghost;
}
if (nz == 1) {
ks = 0;
ke = 1;
}
else {
ks = n_ghost;
ke = nz-n_ghost;
}
Real d, E;
Real n, T, T_init;
Real del_T, dt_sub;
Real mu; // mean molecular weight
Real cool; //cooling rate per volume, erg/s/cm^3
//#ifndef DE
Real vx, vy, vz, p;
//#endif
#ifdef DE
Real ge;
#endif
Real T_min = 1.0e4; // minimum temperature allowed
mu = 0.6;
//mu = 1.27;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int id = threadIdx.x + blockId * blockDim.x;
int zid = id / (nx*ny);
int yid = (id - zid*nx*ny) / nx;
int xid = id - zid*nx*ny - yid*nx;
// and a thread id within the block
int tid = threadIdx.x;
// set min dt to a high number
min_dt[tid] = 1e10;
__syncthreads();
// only threads corresponding to real cells do the calculation
if (xid >= is && xid < ie && yid >= js && yid < je && zid >= ks && zid < ke) {
// load values of density and pressure
d = dev_conserved[ id];
E = dev_conserved[4*n_cells + id];
// don't apply cooling if this thread crashed
if (E < 0.0 || E != E) return;
//#ifndef DE
vx = dev_conserved[1*n_cells + id] / d;
vy = dev_conserved[2*n_cells + id] / d;
vz = dev_conserved[3*n_cells + id] / d;
p = (E - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
p = fmax(p, (Real) TINY_NUMBER);
//#endif
#ifdef DE
ge = dev_conserved[(n_fields-1)*n_cells + id] / d;
ge = fmax(ge, (Real) TINY_NUMBER);
#endif
// calculate the number density of the gas (in cgs)
n = d*DENSITY_UNIT / (mu * MP);
// calculate the temperature of the gas
//#ifndef DE
T_init = p*PRESSURE_UNIT/ (n*KB);
//#endif
#ifdef DE
//T_init = ge*(gamma-1.0)*SP_ENERGY_UNIT*mu*MP/KB;
T_init = d*ge*(gamma-1.0)*PRESSURE_UNIT/(n*KB);
#endif
// calculate cooling rate per volume
T = T_init;
//if (T > T_max) printf("%3d %3d %3d High T cell. n: %e T: %e\n", xid, yid, zid, n, T);
// call the cooling function
//cool = CIE_cool(n, T);
cool = Cloudy_cool(n, T);
// calculate change in temperature given dt
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
// limit change in temperature to 1%
while (del_T/T > 0.01) {
// what dt gives del_T = 0.01*T?
dt_sub = 0.01*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
// apply that dt
T -= cool*dt_sub*TIME_UNIT*(gamma-1.0)/(n*KB);
// how much time is left from the original timestep?
dt -= dt_sub;
// calculate cooling again
//cool = CIE_cool(n, T);
cool = Cloudy_cool(n, T);
// calculate new change in temperature
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
}
// calculate final temperature
T -= del_T;
// set a temperature floor
// (don't change this cell if the thread crashed)
//if (T > 0.0 && E > 0.0) T = fmax(T, T_min);
// set a temperature ceiling
//T = fmin(T, T_max);
// adjust value of energy based on total change in temperature
del_T = T_init - T; // total change in T
E -= n*KB*del_T / ((gamma-1.0)*ENERGY_UNIT);
#ifdef DE
ge -= KB*del_T / (mu*MP*(gamma-1.0)*SP_ENERGY_UNIT);
#endif
// calculate cooling rate for new T
//cool = CIE_cool(n, T);
cool = Cloudy_cool(n, T);
printf("%d %d %d %e %e %e\n", xid, yid, zid, n, T, cool);
// only use good cells in timestep calculation (in case some have crashed)
if (n > 0 && T > 0 && cool > 0.0) {
// limit the timestep such that delta_T is 10%
min_dt[tid] = 0.1*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
}
// and send back from kernel
dev_conserved[4*n_cells + id] = E;
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] = d*ge;
#endif
}
__syncthreads();
// do the reduction in shared memory (find the min timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
min_dt[tid] = fmin(min_dt[tid], min_dt[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dt_array[blockIdx.x] = min_dt[0];
}
/* \fn __device__ Real test_cool(Real n, Real T)
* \brief Cooling function from Creasey 2011. */
__device__ Real test_cool(int tid, Real n, Real T)
{
Real T0, T1, lambda, cool;
T0 = 10000.0;
T1 = 20*T0;
cool = 0.0;
//lambda = 5.0e-24; //cooling coefficient, 5e-24 erg cm^3 s^-1
lambda = 5.0e-20; //cooling coefficient, 5e-24 erg cm^3 s^-1
// constant cooling rate
//cool = n*n*lambda;
// Creasey cooling function
if (T >= T0 && T <= 0.5*(T1+T0)) {
cool = n*n*lambda*(T - T0) / T0;
}
if (T >= 0.5*(T1+T0) && T <= T1) {
cool = n*n*lambda*(T1 - T) / T0;
}
//printf("%d %f %f\n", tid, T, cool);
return cool;
}
/* \fn __device__ Real primordial_cool(Real n, Real T)
* \brief Primordial hydrogen/helium cooling curve
derived according to Katz et al. 1996. */
__device__ Real primordial_cool(Real n, Real T)
{
Real n_h, Y, y, g_ff, cool;
Real n_h0, n_hp, n_he0, n_hep, n_hepp, n_e, n_e_old;
Real alpha_hp, alpha_hep, alpha_d, alpha_hepp, gamma_eh0, gamma_ehe0, gamma_ehep;
Real le_h0, le_hep, li_h0, li_he0, li_hep, lr_hp, lr_hep, lr_hepp, ld_hep, l_ff;
Real gamma_lh0, gamma_lhe0, gamma_lhep, e_h0, e_he0, e_hep, H;
int heat_flag, n_iter;
Real diff, tol;
// set flag to 1 for photoionization & heating
heat_flag = 0;
//Real X = 0.76; //hydrogen abundance by mass
Y = 0.24; //helium abundance by mass
y = Y/(4 - 4*Y);
// set the hydrogen number density
n_h = n;
// calculate the recombination and collisional ionziation rates
// (Table 2 from Katz 1996)
alpha_hp = (8.4e-11) * (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
alpha_hep = (1.5e-10) * (pow(T,(-0.6353)));
alpha_d = (1.9e-3) * (pow(T,(-1.5))) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T));
alpha_hepp = (3.36e-10)* (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
gamma_eh0 = (5.85e-11)* sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehe0 = (2.38e-11)* sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehep = (5.68e-12)* sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5)));
// externally evaluated integrals for photoionziation rates
// assumed J(nu) = 10^-22 (nu_L/nu)
gamma_lh0 = 3.19851e-13;
gamma_lhe0 = 3.13029e-13;
gamma_lhep = 2.00541e-14;
// externally evaluated integrals for heating rates
e_h0 = 2.4796e-24;
e_he0 = 6.86167e-24;
e_hep = 6.21868e-25;
// assuming no photoionization, solve equations for number density of
// each species
n_e = n_h; //as a first guess, use the hydrogen number density
n_iter = 20;
diff = 1.0;
tol = 1.0e-6;
if (heat_flag) {
for (int i=0; i<n_iter; i++) {
n_e_old = n_e;
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0 + gamma_lh0/n_e);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0 + gamma_lhe0/n_e) + (gamma_ehep + gamma_lhep/n_e)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0 + gamma_lhe0/n_e);
n_hepp = n_hep*(gamma_ehep + gamma_lhep/n_e)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
diff = fabs(n_e_old - n_e);
if (diff < tol) break;
}
}
else {
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0) + (gamma_ehep)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0);
n_hepp = n_hep*(gamma_ehep)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
}
// using number densities, calculate cooling rates for
// various processes (Table 1 from Katz 1996)
le_h0 = (7.50e-19) * exp(-118348.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
le_hep = (5.54e-17) * pow(T,(-0.397)) * exp(-473638.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
li_h0 = (1.27e-21) * sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
li_he0 = (9.38e-22) * sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_he0;
li_hep = (4.95e-22) * sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
lr_hp = (8.70e-27) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hp;
lr_hep = (1.55e-26) * pow(T,(0.3647)) * n_e * n_hep;
lr_hepp = (3.48e-26) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hepp;
ld_hep = (1.24e-13) * pow(T,(-1.5)) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T)) * n_e * n_hep;
g_ff = 1.1 + 0.34*exp(-(5.5-log(T))*(5.5-log(T))/3.0); // Gaunt factor
l_ff = (1.42e-27) * g_ff * sqrt(T) * (n_hp + n_hep + 4*n_hepp) * n_e;
// calculate total cooling rate (erg s^-1 cm^-3)
cool = le_h0 + le_hep + li_h0 + li_he0 + li_hep + lr_hp + lr_hep + lr_hepp + ld_hep + l_ff;
// calculate total photoionization heating rate
H = 0.0;
if (heat_flag) {
H = n_h0*e_h0 + n_he0*e_he0 + n_hep*e_hep;
}
cool -= H;
return cool;
}
/* \fn __device__ Real CIE_cool(Real n, Real T)
* \brief Analytic fit to a solar metallicity CIE cooling curve
calculated using Cloudy. */
__device__ Real CIE_cool(Real n, Real T)
{
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
// fit to CIE cooling function
if (log10(T) < 4.0) {
lambda = 0.0;
}
else if (log10(T) >= 4.0 && log10(T) < 5.9) {
lambda = pow(10.0, (-1.3 * (log10(T) - 5.25) * (log10(T) - 5.25) - 21.25));
}
else if (log10(T) >= 5.9 && log10(T) < 7.4) {
lambda = pow(10.0, (0.7 * (log10(T) - 7.1) * (log10(T) - 7.1) - 22.8));
}
else {
lambda = pow(10.0, (0.45*log10(T) - 26.065));
}
// cooling rate per unit volume
cool = n*n*lambda;
return cool;
}
/* \fn __device__ Real Cloudy_cool(Real n, Real T)
* \brief Uses texture mapping to interpolate Cloudy cooling/heating
tables at z = 0 with solar metallicity and an HM05 UV background. */
__device__ Real Cloudy_cool(Real n, Real T)
{
#ifdef CLOUDY_COOL
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real H = 0.0; //heating rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
float log_n, log_T;
log_n = log10(n);
log_T = log10(T);
// remap coordinates for texture
log_T = (log_T - 1.0)/8.1;
log_n = (log_n + 6.0)/12.1;
// don't cool below 10 K
if (log10(T) > 1.0) {
lambda = tex2D<float>(coolTexObj, log_T, log_n);
}
else lambda = 0.0;
H = tex2D<float>(heatTexObj, log_T, log_n);
// cooling rate per unit volume
cool = n*n*(powf(10, lambda) - powf(10, H));
return cool;
#endif
}
#endif //COOLING_GPU
#endif //CUDA
|
aaab786371e909aa152c0071098d246fd7637360.hip | // !!! This is a file automatically generated by hipify!!!
#include <wb.h>
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
#define Mask_width 5
#define Mask_radius Mask_width/2
//@@ INSERT CODE HERE
int main(int argc, char* argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char * inputImageFile;
char * inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float * hostInputImageData;
float * hostOutputImageData;
float * hostMaskData;
float * deviceInputImageData;
float * deviceOutputImageData;
float * deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData,
hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData,
hostMaskData,
maskRows * maskColumns * sizeof(float),
hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData,
deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
| aaab786371e909aa152c0071098d246fd7637360.cu | #include <wb.h>
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
#define Mask_width 5
#define Mask_radius Mask_width/2
//@@ INSERT CODE HERE
int main(int argc, char* argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char * inputImageFile;
char * inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float * hostInputImageData;
float * hostOutputImageData;
float * hostMaskData;
float * deviceInputImageData;
float * deviceOutputImageData;
float * deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData,
hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData,
hostMaskData,
maskRows * maskColumns * sizeof(float),
cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData,
deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
3cb3337f9885671a188914007b624735a411b8c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "stencil/cuda_runtime.hpp"
#include "stencil/logging.hpp"
#include "stencil/machine.hpp"
#include "stencil/mpi.hpp"
#include "stencil/nvml.hpp"
#include <vector>
/* the NVML UUID looks like an array of chars like ascii "GPU-d1810711-f3ef-4529-8662-52609f808deb"
the CUDA device prop UUID is an array of char with bytes that look like d1810711f3ef4529866252609f808deb when printed as
hex return a vector of unsigned char from the nvml ascii string
*/
template <unsigned N> std::vector<unsigned char> parse_nvml_uuid(char uuid[N]) {
std::vector<unsigned char> ret;
LOG_DEBUG("parsing NVML uuid " << uuid);
// scan through characters and start after any non-hex ones
unsigned start = 0;
for (unsigned i = 0; i < N && 0 != uuid[i]; ++i) {
char c = uuid[i];
if ((c == '-') || // this is part of the UUID string
(c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') || (c >= '0' && c <= '9')) { // c is a UUID char
} else {
// c is not a UUID char, start parsing afterwards
start = i + 1;
}
}
for (unsigned i = start; i < N && 0 != uuid[i];) {
if ('-' == uuid[i]) {
++i;
continue;
}
unsigned char val;
int filled = sscanf(&uuid[i], "%2hhx", &val);
if (1 != filled) {
LOG_FATAL("NVML UUID parse error");
}
ret.push_back(val);
i += 2;
}
return ret;
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
Machine machine = Machine::build(MPI_COMM_WORLD);
if (0 == mpi::world_rank()) {
LOG_INFO("nodes: " << machine.num_nodes());
for (int rank = 0; rank < machine.num_ranks(); ++rank) {
LOG_INFO("rank " << rank << ": node " << machine.node_of_rank(rank));
}
for (int gpu = 0; gpu < machine.num_gpus(); ++gpu) {
std::string s;
s += "gpu ";
s += std::to_string(gpu);
s += "/";
s += std::string(machine.gpu(gpu).uuid());
s += ": ranks [";
for (auto r : machine.gpu(gpu).ranks()) {
s += std::to_string(r) + " ";
}
s += ']';
LOG_INFO(s);
}
}
#if 0
#endif
#if 0
const int rank = mpi::comm_rank(MPI_COMM_WORLD);
const int size = mpi::comm_size(MPI_COMM_WORLD);
rsmi_status_t ret;
nvml::lazy_init();
unsigned int deviceCount;
NVML(nvmlDeviceGetCount_v2(&deviceCount));
LOG_INFO(deviceCount << " NVML devices");
for (unsigned int index = 0; index < deviceCount; ++index) {
uint32_t device;
NVML(nvmlDeviceGetHandleByIndex_v2(index, &device))
#if NVML_API_VERSION >= 11
char uuid[NVML_DEVICE_UUID_V2_BUFFER_SIZE];
#else
char uuid[NVML_DEVICE_UUID_BUFFER_SIZE];
#endif
NVML(nvmlDeviceGetUUID(device, uuid, sizeof(uuid)));
LOG_INFO(" NVML " << index << ": " << uuid);
std::vector<unsigned char> rawUuid = parse_nvml_uuid<sizeof(uuid)>(uuid);
LOG_DEBUG(" NVML " << index << ": " << rawUuid.size());
}
int cudaDevCount;
CUDA_RUNTIME(hipGetDeviceCount(&cudaDevCount));
LOG_INFO(cudaDevCount << " CUDA devices");
for (int index = 0; index < cudaDevCount; ++index) {
hipDeviceProp_t prop;
CUDA_RUNTIME(hipGetDeviceProperties(&prop, index));
// hex str of uuid. 2 hex chars per byte
char uuidStr[sizeof(prop.uuid.bytes) * 2 + 1] = {};
for (unsigned i = 0; i < sizeof(prop.uuid.bytes); ++i) {
snprintf(&uuidStr[2*i], 3/*max 2 bytes,+1 NULL*/, "%02x", prop.uuid.bytes[i]);
}
LOG_INFO(" CUDA " << index << ": " << uuidStr);
}
#endif
MPI_Finalize();
return 0;
}
| 3cb3337f9885671a188914007b624735a411b8c0.cu | #include "stencil/cuda_runtime.hpp"
#include "stencil/logging.hpp"
#include "stencil/machine.hpp"
#include "stencil/mpi.hpp"
#include "stencil/nvml.hpp"
#include <vector>
/* the NVML UUID looks like an array of chars like ascii "GPU-d1810711-f3ef-4529-8662-52609f808deb"
the CUDA device prop UUID is an array of char with bytes that look like d1810711f3ef4529866252609f808deb when printed as
hex return a vector of unsigned char from the nvml ascii string
*/
template <unsigned N> std::vector<unsigned char> parse_nvml_uuid(char uuid[N]) {
std::vector<unsigned char> ret;
LOG_DEBUG("parsing NVML uuid " << uuid);
// scan through characters and start after any non-hex ones
unsigned start = 0;
for (unsigned i = 0; i < N && 0 != uuid[i]; ++i) {
char c = uuid[i];
if ((c == '-') || // this is part of the UUID string
(c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') || (c >= '0' && c <= '9')) { // c is a UUID char
} else {
// c is not a UUID char, start parsing afterwards
start = i + 1;
}
}
for (unsigned i = start; i < N && 0 != uuid[i];) {
if ('-' == uuid[i]) {
++i;
continue;
}
unsigned char val;
int filled = sscanf(&uuid[i], "%2hhx", &val);
if (1 != filled) {
LOG_FATAL("NVML UUID parse error");
}
ret.push_back(val);
i += 2;
}
return ret;
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
Machine machine = Machine::build(MPI_COMM_WORLD);
if (0 == mpi::world_rank()) {
LOG_INFO("nodes: " << machine.num_nodes());
for (int rank = 0; rank < machine.num_ranks(); ++rank) {
LOG_INFO("rank " << rank << ": node " << machine.node_of_rank(rank));
}
for (int gpu = 0; gpu < machine.num_gpus(); ++gpu) {
std::string s;
s += "gpu ";
s += std::to_string(gpu);
s += "/";
s += std::string(machine.gpu(gpu).uuid());
s += ": ranks [";
for (auto r : machine.gpu(gpu).ranks()) {
s += std::to_string(r) + " ";
}
s += ']';
LOG_INFO(s);
}
}
#if 0
#endif
#if 0
const int rank = mpi::comm_rank(MPI_COMM_WORLD);
const int size = mpi::comm_size(MPI_COMM_WORLD);
nvmlReturn_t ret;
nvml::lazy_init();
unsigned int deviceCount;
NVML(nvmlDeviceGetCount_v2(&deviceCount));
LOG_INFO(deviceCount << " NVML devices");
for (unsigned int index = 0; index < deviceCount; ++index) {
nvmlDevice_t device;
NVML(nvmlDeviceGetHandleByIndex_v2(index, &device))
#if NVML_API_VERSION >= 11
char uuid[NVML_DEVICE_UUID_V2_BUFFER_SIZE];
#else
char uuid[NVML_DEVICE_UUID_BUFFER_SIZE];
#endif
NVML(nvmlDeviceGetUUID(device, uuid, sizeof(uuid)));
LOG_INFO(" NVML " << index << ": " << uuid);
std::vector<unsigned char> rawUuid = parse_nvml_uuid<sizeof(uuid)>(uuid);
LOG_DEBUG(" NVML " << index << ": " << rawUuid.size());
}
int cudaDevCount;
CUDA_RUNTIME(cudaGetDeviceCount(&cudaDevCount));
LOG_INFO(cudaDevCount << " CUDA devices");
for (int index = 0; index < cudaDevCount; ++index) {
cudaDeviceProp prop;
CUDA_RUNTIME(cudaGetDeviceProperties(&prop, index));
// hex str of uuid. 2 hex chars per byte
char uuidStr[sizeof(prop.uuid.bytes) * 2 + 1] = {};
for (unsigned i = 0; i < sizeof(prop.uuid.bytes); ++i) {
snprintf(&uuidStr[2*i], 3/*max 2 bytes,+1 NULL*/, "%02x", prop.uuid.bytes[i]);
}
LOG_INFO(" CUDA " << index << ": " << uuidStr);
}
#endif
MPI_Finalize();
return 0;
}
|
91914a4a489813913eec352ef92b4694c49fd643.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/roll_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void roll_cuda_kernel(const T* input, T* output, int64_t N,
int64_t* shifts, int64_t* strides,
int64_t* sizes, int64_t nums) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
int64_t output_idx = idx;
int64_t dim_idx, dim_idx_shift;
for (int64_t i = 0; i < nums; i++) {
dim_idx = idx % (strides[i] * sizes[i]) / strides[i];
dim_idx_shift = (dim_idx + shifts[i]) % sizes[i];
output_idx = output_idx + (dim_idx_shift - dim_idx) * strides[i];
}
output[output_idx] = input[idx];
}
template <typename DeviceContext, typename T>
class RollCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<LoDTensor>("X");
auto* out = context.Output<LoDTensor>("Out");
std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts");
std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis");
auto* in_data = in->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
int64_t numel = in->numel();
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
size_t nums = shifts.size();
auto input_dim = in->dims();
auto stride_dim = framework::stride(input_dim);
int64_t dim, size;
size_t gpu_memory_size_ = sizeof(int64_t) * nums;
std::vector<int64_t> strides, sizes;
strides.resize(nums);
sizes.resize(nums);
paddle::memory::AllocationPtr shifts_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr strides_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr sizes_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
for (size_t i = 0; i < nums; i++) {
dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size();
size = input_dim[dim];
shifts[i] = (shifts[i] % size + size) % size;
strides[i] = stride_dim[dim];
sizes[i] = size;
}
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, shifts_gpu->place()),
shifts_gpu->ptr(), platform::CPUPlace(), shifts.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, strides_gpu->place()),
strides_gpu->ptr(), platform::CPUPlace(), strides.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, sizes_gpu->place()),
sizes_gpu->ptr(), platform::CPUPlace(), sizes.data(), gpu_memory_size_,
stream);
int64_t* shifts_ptr = reinterpret_cast<int64_t*>(shifts_gpu->ptr());
int64_t* strides_ptr = reinterpret_cast<int64_t*>(strides_gpu->ptr());
int64_t* sizes_ptr = reinterpret_cast<int64_t*>(sizes_gpu->ptr());
hipLaunchKernelGGL(( roll_cuda_kernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
in_data, out_data, numel, shifts_ptr, strides_ptr, sizes_ptr, nums);
}
};
template <typename DeviceContext, typename T>
class RollGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto* out = context.Output<LoDTensor>(framework::GradVarName("X"));
std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts");
std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis");
auto* in_data = in->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
int64_t numel = in->numel();
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
size_t nums = shifts.size();
auto input_dim = in->dims();
auto stride_dim = framework::stride(input_dim);
int64_t dim, size;
size_t gpu_memory_size_ = sizeof(int64_t) * nums;
std::vector<int64_t> strides, sizes;
strides.resize(nums);
sizes.resize(nums);
paddle::memory::AllocationPtr shifts_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr strides_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr sizes_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
for (size_t i = 0; i < nums; i++) {
dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size();
size = input_dim[dim];
shifts[i] = ((0 - shifts[i]) % size + size) % size;
strides[i] = stride_dim[dim];
sizes[i] = size;
}
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, shifts_gpu->place()),
shifts_gpu->ptr(), platform::CPUPlace(), shifts.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, strides_gpu->place()),
strides_gpu->ptr(), platform::CPUPlace(), strides.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, sizes_gpu->place()),
sizes_gpu->ptr(), platform::CPUPlace(), sizes.data(), gpu_memory_size_,
stream);
int64_t* shifts_ptr = reinterpret_cast<int64_t*>(shifts_gpu->ptr());
int64_t* strides_ptr = reinterpret_cast<int64_t*>(strides_gpu->ptr());
int64_t* sizes_ptr = reinterpret_cast<int64_t*>(sizes_gpu->ptr());
hipLaunchKernelGGL(( roll_cuda_kernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
in_data, out_data, numel, shifts_ptr, strides_ptr, sizes_ptr, nums);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roll, ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
roll_grad,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
| 91914a4a489813913eec352ef92b4694c49fd643.cu | // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/roll_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void roll_cuda_kernel(const T* input, T* output, int64_t N,
int64_t* shifts, int64_t* strides,
int64_t* sizes, int64_t nums) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
int64_t output_idx = idx;
int64_t dim_idx, dim_idx_shift;
for (int64_t i = 0; i < nums; i++) {
dim_idx = idx % (strides[i] * sizes[i]) / strides[i];
dim_idx_shift = (dim_idx + shifts[i]) % sizes[i];
output_idx = output_idx + (dim_idx_shift - dim_idx) * strides[i];
}
output[output_idx] = input[idx];
}
template <typename DeviceContext, typename T>
class RollCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<LoDTensor>("X");
auto* out = context.Output<LoDTensor>("Out");
std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts");
std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis");
auto* in_data = in->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
int64_t numel = in->numel();
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
size_t nums = shifts.size();
auto input_dim = in->dims();
auto stride_dim = framework::stride(input_dim);
int64_t dim, size;
size_t gpu_memory_size_ = sizeof(int64_t) * nums;
std::vector<int64_t> strides, sizes;
strides.resize(nums);
sizes.resize(nums);
paddle::memory::AllocationPtr shifts_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr strides_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr sizes_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
for (size_t i = 0; i < nums; i++) {
dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size();
size = input_dim[dim];
shifts[i] = (shifts[i] % size + size) % size;
strides[i] = stride_dim[dim];
sizes[i] = size;
}
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, shifts_gpu->place()),
shifts_gpu->ptr(), platform::CPUPlace(), shifts.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, strides_gpu->place()),
strides_gpu->ptr(), platform::CPUPlace(), strides.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, sizes_gpu->place()),
sizes_gpu->ptr(), platform::CPUPlace(), sizes.data(), gpu_memory_size_,
stream);
int64_t* shifts_ptr = reinterpret_cast<int64_t*>(shifts_gpu->ptr());
int64_t* strides_ptr = reinterpret_cast<int64_t*>(strides_gpu->ptr());
int64_t* sizes_ptr = reinterpret_cast<int64_t*>(sizes_gpu->ptr());
roll_cuda_kernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
in_data, out_data, numel, shifts_ptr, strides_ptr, sizes_ptr, nums);
}
};
template <typename DeviceContext, typename T>
class RollGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto* out = context.Output<LoDTensor>(framework::GradVarName("X"));
std::vector<int64_t> shifts = context.Attr<std::vector<int64_t>>("shifts");
std::vector<int64_t> dims = context.Attr<std::vector<int64_t>>("axis");
auto* in_data = in->data<T>();
auto* out_data = out->mutable_data<T>(context.GetPlace());
int64_t numel = in->numel();
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
size_t nums = shifts.size();
auto input_dim = in->dims();
auto stride_dim = framework::stride(input_dim);
int64_t dim, size;
size_t gpu_memory_size_ = sizeof(int64_t) * nums;
std::vector<int64_t> strides, sizes;
strides.resize(nums);
sizes.resize(nums);
paddle::memory::AllocationPtr shifts_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr strides_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
paddle::memory::AllocationPtr sizes_gpu =
memory::Alloc(context.GetPlace(), gpu_memory_size_);
for (size_t i = 0; i < nums; i++) {
dim = dims[i] >= 0 ? dims[i] : dims[i] + input_dim.size();
size = input_dim[dim];
shifts[i] = ((0 - shifts[i]) % size + size) % size;
strides[i] = stride_dim[dim];
sizes[i] = size;
}
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, shifts_gpu->place()),
shifts_gpu->ptr(), platform::CPUPlace(), shifts.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, strides_gpu->place()),
strides_gpu->ptr(), platform::CPUPlace(), strides.data(),
gpu_memory_size_, stream);
paddle::memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, sizes_gpu->place()),
sizes_gpu->ptr(), platform::CPUPlace(), sizes.data(), gpu_memory_size_,
stream);
int64_t* shifts_ptr = reinterpret_cast<int64_t*>(shifts_gpu->ptr());
int64_t* strides_ptr = reinterpret_cast<int64_t*>(strides_gpu->ptr());
int64_t* sizes_ptr = reinterpret_cast<int64_t*>(sizes_gpu->ptr());
roll_cuda_kernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
in_data, out_data, numel, shifts_ptr, strides_ptr, sizes_ptr, nums);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roll, ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::RollCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
roll_grad,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::RollGradCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
|
fc03418945db7fa6dee60a32b5c0f8d9e0cbe364.hip | // !!! This is a file automatically generated by hipify!!!
/* CUDA blur
* Kevin Yuh, 2014 */
#include <cstdio>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "fft_convolve.cuh"
/*
Atomic-max function. You may find it useful for normalization.
We haven't really talked about this yet, but __device__ functions not
only are run on the GPU, but are called from within a kernel.
Source:
http://stackoverflow.com/questions/17399119/
cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__
void
cudaProdScaleKernel(const hipfftComplex *raw_data, const hipfftComplex *impulse_v,
hipfftComplex *out_data,
int padded_length) {
/* TODO: Implement the point-wise multiplication and scaling for the
FFT'd input and impulse response.
Recall that these are complex numbers, so you'll need to use the
appropriate rule for multiplying them.
Also remember to scale by the padded length of the signal
(see the notes for Question 1).
As in Assignment 1 and Week 1, remember to make your implementation
resilient to varying numbers of threads.
*/
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < padded_length) {
out_data[idx].x = (raw_data[idx].x * impulse_v[idx].x - raw_data[idx].y * impulse_v[idx].y) / padded_length;
out_data[idx].y = (raw_data[idx].x * impulse_v[idx].y + raw_data[idx].y * impulse_v[idx].x) / padded_length;
idx += blockDim.x * gridDim.x;
}
}
__global__
void
cudaMaximumKernel(hipfftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2: Implement the maximum-finding.
There are many ways to do this reduction, and some methods
have much better performance than others.
For this section: Please explain your approach to the reduction,
including why you chose the optimizations you did
(especially as they relate to GPU hardware).
You'll likely find the above atomicMax function helpful.
(CUDA's atomicMax function doesn't work for floating-point values.)
It's based on two principles:
1) From Week 2, any atomic function can be implemented using
atomic compare-and-swap.
2) One can "represent" floating-point values as integers in
a way that preserves comparison, if the sign of the two
values is the same. (see http://stackoverflow.com/questions/
29596797/can-the-return-value-of-float-as-int-be-used-to-
compare-float-in-cuda)
*/
extern __shared__ float shared_memory_data[];
int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
int shared_idx = threadIdx.x;
int start_idx = blockIdx.x * blockDim.x;
while (start_idx < padded_length) {
// Initialize the shared memory
shared_memory_data[shared_idx] = 0.0;
__syncthreads();
// Copy data from global memory to the shared memory
if (global_idx < padded_length) {
shared_memory_data[shared_idx] = out_data[global_idx].x > 0 ? out_data[global_idx].x : -out_data[global_idx].x;
}
__syncthreads();
// Begin reduction
int shared_idx_threshold = blockDim.x;
for (int i = 0; i < (int)log2((double)blockDim.x); i++) {
shared_idx_threshold /= 2;
if (shared_idx < shared_idx_threshold) {
shared_memory_data[shared_idx] =
shared_memory_data[shared_idx] > shared_memory_data[shared_idx + shared_idx_threshold] ? shared_memory_data[shared_idx] : shared_memory_data[shared_idx + shared_idx_threshold];
}
__syncthreads();
}
if (shared_idx == 0) {
// Use the maximum value inside this block to update the global maximum abs val
atomicMax(max_abs_val, shared_memory_data[0]);
}
// Update the start_idx
start_idx += blockDim.x * gridDim.x;
global_idx += blockDim.x * gridDim.x;
}
}
__global__
void
cudaDivideKernel(hipfftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2: Implement the division kernel. Divide all
data by the value pointed to by max_abs_val.
This kernel should be quite short.
*/
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx > padded_length) {
out_data[idx].x = out_data[idx].x / (*max_abs_val);
idx += blockDim.x * gridDim.x;
}
}
void cudaCallProdScaleKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const hipfftComplex *raw_data,
const hipfftComplex *impulse_v,
hipfftComplex *out_data,
const unsigned int padded_length) {
/* TODO: Call the element-wise product and scaling kernel. */
hipLaunchKernelGGL(( cudaProdScaleKernel), dim3(dim3(blocks)), dim3(dim3(threadsPerBlock)), 0, 0, raw_data, impulse_v, out_data, padded_length);
}
void cudaCallMaximumKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
hipfftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2: Call the max-finding kernel. */
hipLaunchKernelGGL(( cudaMaximumKernel), dim3(dim3(blocks)), dim3(dim3(threadsPerBlock)), threadsPerBlock * sizeof(float), 0, out_data, max_abs_val, padded_length);
}
void cudaCallDivideKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
hipfftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2: Call the division kernel. */
hipLaunchKernelGGL(( cudaDivideKernel), dim3(dim3(blocks)), dim3(dim3(threadsPerBlock)), 0, 0, out_data, max_abs_val, padded_length);
}
| fc03418945db7fa6dee60a32b5c0f8d9e0cbe364.cu | /* CUDA blur
* Kevin Yuh, 2014 */
#include <cstdio>
#include <math.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include "fft_convolve.cuh"
/*
Atomic-max function. You may find it useful for normalization.
We haven't really talked about this yet, but __device__ functions not
only are run on the GPU, but are called from within a kernel.
Source:
http://stackoverflow.com/questions/17399119/
cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__
void
cudaProdScaleKernel(const cufftComplex *raw_data, const cufftComplex *impulse_v,
cufftComplex *out_data,
int padded_length) {
/* TODO: Implement the point-wise multiplication and scaling for the
FFT'd input and impulse response.
Recall that these are complex numbers, so you'll need to use the
appropriate rule for multiplying them.
Also remember to scale by the padded length of the signal
(see the notes for Question 1).
As in Assignment 1 and Week 1, remember to make your implementation
resilient to varying numbers of threads.
*/
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < padded_length) {
out_data[idx].x = (raw_data[idx].x * impulse_v[idx].x - raw_data[idx].y * impulse_v[idx].y) / padded_length;
out_data[idx].y = (raw_data[idx].x * impulse_v[idx].y + raw_data[idx].y * impulse_v[idx].x) / padded_length;
idx += blockDim.x * gridDim.x;
}
}
__global__
void
cudaMaximumKernel(cufftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2: Implement the maximum-finding.
There are many ways to do this reduction, and some methods
have much better performance than others.
For this section: Please explain your approach to the reduction,
including why you chose the optimizations you did
(especially as they relate to GPU hardware).
You'll likely find the above atomicMax function helpful.
(CUDA's atomicMax function doesn't work for floating-point values.)
It's based on two principles:
1) From Week 2, any atomic function can be implemented using
atomic compare-and-swap.
2) One can "represent" floating-point values as integers in
a way that preserves comparison, if the sign of the two
values is the same. (see http://stackoverflow.com/questions/
29596797/can-the-return-value-of-float-as-int-be-used-to-
compare-float-in-cuda)
*/
extern __shared__ float shared_memory_data[];
int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
int shared_idx = threadIdx.x;
int start_idx = blockIdx.x * blockDim.x;
while (start_idx < padded_length) {
// Initialize the shared memory
shared_memory_data[shared_idx] = 0.0;
__syncthreads();
// Copy data from global memory to the shared memory
if (global_idx < padded_length) {
shared_memory_data[shared_idx] = out_data[global_idx].x > 0 ? out_data[global_idx].x : -out_data[global_idx].x;
}
__syncthreads();
// Begin reduction
int shared_idx_threshold = blockDim.x;
for (int i = 0; i < (int)log2((double)blockDim.x); i++) {
shared_idx_threshold /= 2;
if (shared_idx < shared_idx_threshold) {
shared_memory_data[shared_idx] =
shared_memory_data[shared_idx] > shared_memory_data[shared_idx + shared_idx_threshold] ? shared_memory_data[shared_idx] : shared_memory_data[shared_idx + shared_idx_threshold];
}
__syncthreads();
}
if (shared_idx == 0) {
// Use the maximum value inside this block to update the global maximum abs val
atomicMax(max_abs_val, shared_memory_data[0]);
}
// Update the start_idx
start_idx += blockDim.x * gridDim.x;
global_idx += blockDim.x * gridDim.x;
}
}
__global__
void
cudaDivideKernel(cufftComplex *out_data, float *max_abs_val,
int padded_length) {
/* TODO 2: Implement the division kernel. Divide all
data by the value pointed to by max_abs_val.
This kernel should be quite short.
*/
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx > padded_length) {
out_data[idx].x = out_data[idx].x / (*max_abs_val);
idx += blockDim.x * gridDim.x;
}
}
void cudaCallProdScaleKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const cufftComplex *raw_data,
const cufftComplex *impulse_v,
cufftComplex *out_data,
const unsigned int padded_length) {
/* TODO: Call the element-wise product and scaling kernel. */
cudaProdScaleKernel<<<dim3(blocks), dim3(threadsPerBlock)>>>(raw_data, impulse_v, out_data, padded_length);
}
void cudaCallMaximumKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
cufftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2: Call the max-finding kernel. */
cudaMaximumKernel<<<dim3(blocks), dim3(threadsPerBlock), threadsPerBlock * sizeof(float)>>>(out_data, max_abs_val, padded_length);
}
void cudaCallDivideKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
cufftComplex *out_data,
float *max_abs_val,
const unsigned int padded_length) {
/* TODO 2: Call the division kernel. */
cudaDivideKernel<<<dim3(blocks), dim3(threadsPerBlock)>>>(out_data, max_abs_val, padded_length);
}
|
89066b513e30e3d8c9c9365b8731160502d424a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bitset.cuh"
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
template <size_t array_size>
__global__ void kern (Bitset<array_size> &a) {
a.add(1);
a.add(-1);
a.add(-2);
a.add(0);
a.add(-160);
a.add(160);
a.add(-394);
a.remove(-1);
a.remove(-394);
}
template <size_t array_size>
__global__ void diff_test (Bitset<array_size> &a, Bitset<array_size> &b) {
}
int main () {
Bitset<20> *a = new Bitset<20>;
Bitset<20> *b = new Bitset<20>;
for(int i=31;i<400;++i){a->add(i); }
for(int i=-100;i<-32;++i){b->add(i); }
/*
a.add(1);
a.add(-1);
a.add(-2);
a.add(0);
a.add(-160);
a.print();
a.add(160);
a.add(-394);
a.remove(-1);
a.remove(-394);
*/
hipDeviceSynchronize();
hipLaunchKernelGGL(( kern), dim3(1),dim3(20), 0, 0, *a);
hipDeviceSynchronize();
b->print();
hipDeviceSynchronize();
hipLaunchKernelGGL(( diff_test), dim3(1),dim3(42), 0, 0, *b, *a);
hipDeviceSynchronize();
printf("=========== B ============\n");
b->print();
printf("=========== A ============\n");
a->print();
printf("=========== B / A============\n");
(b->diff(*a)).print();
Bitset<20> c;
c = b->diff(*a);
printf("=========== C ============\n");
c.add(-32);
c.print();
c.max();
printf("min: %i\n", a->min());
return 0;
};
| 89066b513e30e3d8c9c9365b8731160502d424a1.cu | #include "bitset.cuh"
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
template <size_t array_size>
__global__ void kern (Bitset<array_size> &a) {
a.add(1);
a.add(-1);
a.add(-2);
a.add(0);
a.add(-160);
a.add(160);
a.add(-394);
a.remove(-1);
a.remove(-394);
}
template <size_t array_size>
__global__ void diff_test (Bitset<array_size> &a, Bitset<array_size> &b) {
}
int main () {
Bitset<20> *a = new Bitset<20>;
Bitset<20> *b = new Bitset<20>;
for(int i=31;i<400;++i){a->add(i); }
for(int i=-100;i<-32;++i){b->add(i); }
/*
a.add(1);
a.add(-1);
a.add(-2);
a.add(0);
a.add(-160);
a.print();
a.add(160);
a.add(-394);
a.remove(-1);
a.remove(-394);
*/
cudaDeviceSynchronize();
kern<<<1,20>>>(*a);
cudaDeviceSynchronize();
b->print();
cudaDeviceSynchronize();
diff_test<<<1,42>>>(*b, *a);
cudaDeviceSynchronize();
printf("=========== B ============\n");
b->print();
printf("=========== A ============\n");
a->print();
printf("=========== B / A============\n");
(b->diff(*a)).print();
Bitset<20> c;
c = b->diff(*a);
printf("=========== C ============\n");
c.add(-32);
c.print();
c.max();
printf("min: %i\n", a->min());
return 0;
};
|
d2d2efde3ed2de136cbdbe86108b3841511c450c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to generate random numbers
__global__
void genran(int *rnd,double m)
{
double n,a=1103515245, c=12345;
n=blockIdx.x*blockDim.x+threadIdx.x;
//n=threadIdx.x;
for(int i=0;i<threadIdx.x;i++)
n=fmod(((n*a)+c),m);
__syncthreads();
atomicAdd(&rnd[(unsigned long int)n],1);
}
int main(void)
{
int t=29;
long int m = pow(2,t);
int *rnd;
double val;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&rnd, m*sizeof(int));
// initialize
val = m;
for (int i = 0; i < m; i++) {
rnd[i] = 0;
}
//generate random numbers
int blockSize = 128;
int numblocks = (m+blockSize-1)/blockSize;
// Run kernel
hipLaunchKernelGGL(( genran), dim3(numblocks), dim3(blockSize), 0, 0, rnd,val);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
//Generate Histrogram
long double count =0,j=0;
for(long int i=0;i<m;++i)
{
count+=rnd[i];
j++;
if(j==pow(2,t-5))
{
j=0;
printf("|");
count/=pow(2,t-10);
for(int k=0;k<count;++k)
printf("*");
printf("\n");
count=0;
}
}
// Free memory
hipFree(rnd);
return 0;
}
| d2d2efde3ed2de136cbdbe86108b3841511c450c.cu | #include <iostream>
#include <math.h>
// Kernel function to generate random numbers
__global__
void genran(int *rnd,double m)
{
double n,a=1103515245, c=12345;
n=blockIdx.x*blockDim.x+threadIdx.x;
//n=threadIdx.x;
for(int i=0;i<threadIdx.x;i++)
n=fmod(((n*a)+c),m);
__syncthreads();
atomicAdd(&rnd[(unsigned long int)n],1);
}
int main(void)
{
int t=29;
long int m = pow(2,t);
int *rnd;
double val;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&rnd, m*sizeof(int));
// initialize
val = m;
for (int i = 0; i < m; i++) {
rnd[i] = 0;
}
//generate random numbers
int blockSize = 128;
int numblocks = (m+blockSize-1)/blockSize;
// Run kernel
genran<<<numblocks, blockSize>>>(rnd,val);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//Generate Histrogram
long double count =0,j=0;
for(long int i=0;i<m;++i)
{
count+=rnd[i];
j++;
if(j==pow(2,t-5))
{
j=0;
printf("|");
count/=pow(2,t-10);
for(int k=0;k<count;++k)
printf("*");
printf("\n");
count=0;
}
}
// Free memory
cudaFree(rnd);
return 0;
}
|
b124761142c3c487a63fa9cec7b5ee0f5f08c3f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "local_sols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Le = NULL;
hipMalloc(&Le, XSIZE*YSIZE);
float *be = NULL;
hipMalloc(&be, XSIZE*YSIZE);
float *ue = NULL;
hipMalloc(&ue, XSIZE*YSIZE);
float *up_glob = NULL;
hipMalloc(&up_glob, XSIZE*YSIZE);
int *cells = NULL;
hipMalloc(&cells, XSIZE*YSIZE);
int num_cells = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
local_sols), dim3(gridBlock),dim3(threadBlock), 0, 0, Le,be,ue,up_glob,cells,num_cells);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
local_sols), dim3(gridBlock),dim3(threadBlock), 0, 0, Le,be,ue,up_glob,cells,num_cells);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
local_sols), dim3(gridBlock),dim3(threadBlock), 0, 0, Le,be,ue,up_glob,cells,num_cells);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b124761142c3c487a63fa9cec7b5ee0f5f08c3f2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "local_sols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *Le = NULL;
cudaMalloc(&Le, XSIZE*YSIZE);
float *be = NULL;
cudaMalloc(&be, XSIZE*YSIZE);
float *ue = NULL;
cudaMalloc(&ue, XSIZE*YSIZE);
float *up_glob = NULL;
cudaMalloc(&up_glob, XSIZE*YSIZE);
int *cells = NULL;
cudaMalloc(&cells, XSIZE*YSIZE);
int num_cells = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
local_sols<<<gridBlock,threadBlock>>>(Le,be,ue,up_glob,cells,num_cells);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
local_sols<<<gridBlock,threadBlock>>>(Le,be,ue,up_glob,cells,num_cells);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
local_sols<<<gridBlock,threadBlock>>>(Le,be,ue,up_glob,cells,num_cells);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
115ea56a5c82299da86392cc7091a30e35a43df2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Attention : Extension .cu
#include <iostream>
#include "cudaTools.h"
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__host__ bool helloCuda(void); //__host__ facultatif
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__global__ static void kernelHello(void);
__device__ static void doSomethingHello(void);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* resumer commande cuda:
* http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/index.html
*/
__host__ bool helloCuda(void) //__host__ facultatif
{
cout << endl << "[Hello Cuda 1]" << endl;
// Specifier nb thread : ici 1 thread au total !
dim3 dg = dim3(1, 1, 1);
dim3 db = dim3(1, 1, 1);
Device::gridHeuristic(dg, db);
Device::lastCudaError("kernelHello (before)"); // temp debug
hipLaunchKernelGGL(( kernelHello), dim3(dg),dim3(db), 0, 0, ); // asynchrone !!
Device::lastCudaError("kernelHello (after)"); // temp debug
return true;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/**
* output : void
*/
__global__ void kernelHello(void)
{
doSomethingHello();
}
/**
* Can be call only by device
* inliner by nvcc (nvidia compiler)
*/
__device__ void doSomethingHello(void)
{
// rien
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 115ea56a5c82299da86392cc7091a30e35a43df2.cu | // Attention : Extension .cu
#include <iostream>
#include "cudaTools.h"
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__host__ bool helloCuda(void); //__host__ facultatif
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__global__ static void kernelHello(void);
__device__ static void doSomethingHello(void);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* resumer commande cuda:
* http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/index.html
*/
__host__ bool helloCuda(void) //__host__ facultatif
{
cout << endl << "[Hello Cuda 1]" << endl;
// Specifier nb thread : ici 1 thread au total !
dim3 dg = dim3(1, 1, 1);
dim3 db = dim3(1, 1, 1);
Device::gridHeuristic(dg, db);
Device::lastCudaError("kernelHello (before)"); // temp debug
kernelHello<<<dg,db>>>(); // asynchrone !!
Device::lastCudaError("kernelHello (after)"); // temp debug
return true;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/**
* output : void
*/
__global__ void kernelHello(void)
{
doSomethingHello();
}
/**
* Can be call only by device
* inliner by nvcc (nvidia compiler)
*/
__device__ void doSomethingHello(void)
{
// rien
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
cfc3eb2396385d0df48aceb57d6163d3ef54d29b.hip | // !!! This is a file automatically generated by hipify!!!
//
// Diogo Andrade 89265
// Francisco Silveira 84802
//
// BASE CODE:
// Toms Oliveira e Silva, November 2017
//
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "common.h"
#include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// program configuration
//
#ifndef N_SAMPLES
#define N_SAMPLES (1 << 16)
#endif
#ifndef DISTANCE
#define DISTANCE 3
#endif
static void ccc_samples_cpu_kernel(double *results_data, double *samples_data_x, double *samples_data_y, unsigned int n_samples, unsigned int point);
__global__ static void ccc_samples_cuda_kernel(double *__restrict__ results_data, double *__restrict__ samples_data_x, double *__restrict__ samples_data_y,
unsigned int n_samples, unsigned int distance);
static double get_delta_time(void);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main program
//
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
if (sizeof(unsigned int) != (size_t)4)
return 1; // fail with prejudice if an integer does not have 4 bytes
// set up device
int dev = 0;
int i;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// create memory areas in host and device memory where the disk samples data will be stored
size_t samples_data_size;
double *host_results_data, *host_samples_data_x, *host_samples_data_y;
double *device_results_data, *device_samples_data_x, *device_samples_data_y;
samples_data_size = (size_t)N_SAMPLES * sizeof(double);
if ((samples_data_size * 3) > (size_t)1.3e9)
{
fprintf(stderr, "The GTX 480 cannot handle more than 1.5GiB of memory!\n");
exit(1);
}
printf("Total samples data size: %lu\n", samples_data_size);
host_results_data = (double *)malloc(samples_data_size);
host_samples_data_x = (double *)malloc(samples_data_size);
host_samples_data_y = (double *)malloc(samples_data_size);
CHECK(hipMalloc((void **)&device_results_data, samples_data_size));
CHECK(hipMalloc((void **)&device_samples_data_x, samples_data_size));
CHECK(hipMalloc((void **)&device_samples_data_y, samples_data_size));
// initialize the host data
(void)get_delta_time();
srand(0xCCE2021);
for (i = 0; i < N_SAMPLES; i++)
{
host_results_data[i] = 0;
host_samples_data_x[i] = ((double)rand() / RAND_MAX) * (0.5 - (-0.5)) + (-0.5);
host_samples_data_y[i] = ((double)rand() / RAND_MAX) * (0.5 - (-0.5)) + (-0.5);
}
printf("The initialization of host data took %.3e seconds\n", get_delta_time());
// copy the host data to the device memory
(void)get_delta_time();
CHECK(hipMemcpy(device_results_data, host_results_data, samples_data_size, hipMemcpyHostToDevice));
CHECK(hipMemcpy(device_samples_data_x, host_samples_data_x, samples_data_size, hipMemcpyHostToDevice));
CHECK(hipMemcpy(device_samples_data_y, host_samples_data_y, samples_data_size, hipMemcpyHostToDevice));
printf("The transfer of %ld bytes from the host to the device took %.3e seconds\n",
(long)(samples_data_size * 3), get_delta_time());
// run the computational kernel
unsigned int gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ;
int n_samples;
n_samples = N_SAMPLES;
blockDimX = 1 << 8; // optimize!
blockDimY = 1 << 0; // optimize!
blockDimZ = 1 << 0; // do not change!
gridDimX = 1 << 8; // optimize!
gridDimY = 1 << 0; // optimize!
gridDimZ = 1 << 0; // do not change!
dim3 grid(gridDimX, gridDimY, gridDimZ);
dim3 block(blockDimX, blockDimY, blockDimZ);
if ((gridDimX * gridDimY * gridDimZ * blockDimX * blockDimY * blockDimZ) != n_samples)
{
printf("Wrong configuration!\n");
return 1;
}
(void)get_delta_time();
hipLaunchKernelGGL(( ccc_samples_cuda_kernel), dim3(grid), dim3(block), 0, 0, device_results_data, device_samples_data_x, device_samples_data_y, n_samples, DISTANCE);
CHECK(hipDeviceSynchronize()); // wait for kernel to finish
CHECK(hipGetLastError()); // check for kernel errors
printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n",
gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time());
// copy kernel result back to host side
double *modified_device_results_data;
modified_device_results_data = (double *)malloc(samples_data_size);
CHECK(hipMemcpy(modified_device_results_data, device_results_data, samples_data_size, hipMemcpyDeviceToHost));
printf("The transfer of %ld bytes from the device to the host took %.3e seconds\n",
(long)samples_data_size, get_delta_time());
// compute the modified samples data on the CPU
(void)get_delta_time();
for (i = 0; i < N_SAMPLES; i++)
ccc_samples_cpu_kernel(host_results_data, host_samples_data_x, host_samples_data_y, n_samples, (i + (i % 32) * (DISTANCE - 1)) % n_samples);
printf("The cpu kernel took %.3e seconds to run (single core)\n", get_delta_time());
// compare
for (i = 0; i < N_SAMPLES; i++)
if (!((abs(modified_device_results_data[i] - host_results_data[i]) < 1e-6) ||
((abs(modified_device_results_data[i]) >= 1e-6) &&
(abs((modified_device_results_data[i] - host_results_data[i]) / modified_device_results_data[i]) < 1e-6))))
{
printf("Expected result not found!\n");
exit(1);
}
printf("All is well!\n");
// free device global memory
CHECK(hipFree(device_results_data));
CHECK(hipFree(device_samples_data_x));
CHECK(hipFree(device_samples_data_y));
// free host memory
free(host_results_data);
free(host_samples_data_x);
free(host_samples_data_y);
free(modified_device_results_data);
// reset device
CHECK(hipDeviceReset());
return 0;
}
static void ccc_samples_cpu_kernel(double *results_data, double *samples_data_x, double *samples_data_y, unsigned int n_samples, unsigned int point)
{
// compute the circular cross-correlation
for (int k = 0; k < n_samples; k++)
results_data[point] += (samples_data_x[k] * samples_data_y[(point + k) % n_samples]);
}
__global__ static void ccc_samples_cuda_kernel(double *__restrict__ results_data, double *__restrict__ samples_data_x, double *__restrict__ samples_data_y,
unsigned int n_samples, unsigned int distance)
{
unsigned int x, y, idx, point;
// compute the thread number
x = (unsigned int)threadIdx.x + (unsigned int)blockDim.x * (unsigned int)blockIdx.x;
y = (unsigned int)threadIdx.y + (unsigned int)blockDim.y * (unsigned int)blockIdx.y;
idx = (unsigned int)blockDim.x * (unsigned int)gridDim.x * y + x;
if (idx >= n_samples)
return; // safety precaution
// ajust point
point = (idx + (idx % 32) * (distance - 1)) % n_samples;
// compute the circular cross-correlation
for (int k = 0; k < n_samples; k++)
results_data[point] += (samples_data_x[k] * samples_data_y[(point + k) % n_samples]);
}
static double get_delta_time(void)
{
static struct timespec t0, t1;
t0 = t1;
if (clock_gettime(CLOCK_MONOTONIC, &t1) != 0)
{
perror("clock_gettime");
exit(1);
}
return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec);
}
| cfc3eb2396385d0df48aceb57d6163d3ef54d29b.cu | //
// Diogo Andrade 89265
// Francisco Silveira 84802
//
// BASE CODE:
// Tomás Oliveira e Silva, November 2017
//
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "common.h"
#include <cuda_runtime.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// program configuration
//
#ifndef N_SAMPLES
#define N_SAMPLES (1 << 16)
#endif
#ifndef DISTANCE
#define DISTANCE 3
#endif
static void ccc_samples_cpu_kernel(double *results_data, double *samples_data_x, double *samples_data_y, unsigned int n_samples, unsigned int point);
__global__ static void ccc_samples_cuda_kernel(double *__restrict__ results_data, double *__restrict__ samples_data_x, double *__restrict__ samples_data_y,
unsigned int n_samples, unsigned int distance);
static double get_delta_time(void);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main program
//
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
if (sizeof(unsigned int) != (size_t)4)
return 1; // fail with prejudice if an integer does not have 4 bytes
// set up device
int dev = 0;
int i;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// create memory areas in host and device memory where the disk samples data will be stored
size_t samples_data_size;
double *host_results_data, *host_samples_data_x, *host_samples_data_y;
double *device_results_data, *device_samples_data_x, *device_samples_data_y;
samples_data_size = (size_t)N_SAMPLES * sizeof(double);
if ((samples_data_size * 3) > (size_t)1.3e9)
{
fprintf(stderr, "The GTX 480 cannot handle more than 1.5GiB of memory!\n");
exit(1);
}
printf("Total samples data size: %lu\n", samples_data_size);
host_results_data = (double *)malloc(samples_data_size);
host_samples_data_x = (double *)malloc(samples_data_size);
host_samples_data_y = (double *)malloc(samples_data_size);
CHECK(cudaMalloc((void **)&device_results_data, samples_data_size));
CHECK(cudaMalloc((void **)&device_samples_data_x, samples_data_size));
CHECK(cudaMalloc((void **)&device_samples_data_y, samples_data_size));
// initialize the host data
(void)get_delta_time();
srand(0xCCE2021);
for (i = 0; i < N_SAMPLES; i++)
{
host_results_data[i] = 0;
host_samples_data_x[i] = ((double)rand() / RAND_MAX) * (0.5 - (-0.5)) + (-0.5);
host_samples_data_y[i] = ((double)rand() / RAND_MAX) * (0.5 - (-0.5)) + (-0.5);
}
printf("The initialization of host data took %.3e seconds\n", get_delta_time());
// copy the host data to the device memory
(void)get_delta_time();
CHECK(cudaMemcpy(device_results_data, host_results_data, samples_data_size, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(device_samples_data_x, host_samples_data_x, samples_data_size, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(device_samples_data_y, host_samples_data_y, samples_data_size, cudaMemcpyHostToDevice));
printf("The transfer of %ld bytes from the host to the device took %.3e seconds\n",
(long)(samples_data_size * 3), get_delta_time());
// run the computational kernel
unsigned int gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ;
int n_samples;
n_samples = N_SAMPLES;
blockDimX = 1 << 8; // optimize!
blockDimY = 1 << 0; // optimize!
blockDimZ = 1 << 0; // do not change!
gridDimX = 1 << 8; // optimize!
gridDimY = 1 << 0; // optimize!
gridDimZ = 1 << 0; // do not change!
dim3 grid(gridDimX, gridDimY, gridDimZ);
dim3 block(blockDimX, blockDimY, blockDimZ);
if ((gridDimX * gridDimY * gridDimZ * blockDimX * blockDimY * blockDimZ) != n_samples)
{
printf("Wrong configuration!\n");
return 1;
}
(void)get_delta_time();
ccc_samples_cuda_kernel<<<grid, block>>>(device_results_data, device_samples_data_x, device_samples_data_y, n_samples, DISTANCE);
CHECK(cudaDeviceSynchronize()); // wait for kernel to finish
CHECK(cudaGetLastError()); // check for kernel errors
printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n",
gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time());
// copy kernel result back to host side
double *modified_device_results_data;
modified_device_results_data = (double *)malloc(samples_data_size);
CHECK(cudaMemcpy(modified_device_results_data, device_results_data, samples_data_size, cudaMemcpyDeviceToHost));
printf("The transfer of %ld bytes from the device to the host took %.3e seconds\n",
(long)samples_data_size, get_delta_time());
// compute the modified samples data on the CPU
(void)get_delta_time();
for (i = 0; i < N_SAMPLES; i++)
ccc_samples_cpu_kernel(host_results_data, host_samples_data_x, host_samples_data_y, n_samples, (i + (i % 32) * (DISTANCE - 1)) % n_samples);
printf("The cpu kernel took %.3e seconds to run (single core)\n", get_delta_time());
// compare
for (i = 0; i < N_SAMPLES; i++)
if (!((abs(modified_device_results_data[i] - host_results_data[i]) < 1e-6) ||
((abs(modified_device_results_data[i]) >= 1e-6) &&
(abs((modified_device_results_data[i] - host_results_data[i]) / modified_device_results_data[i]) < 1e-6))))
{
printf("Expected result not found!\n");
exit(1);
}
printf("All is well!\n");
// free device global memory
CHECK(cudaFree(device_results_data));
CHECK(cudaFree(device_samples_data_x));
CHECK(cudaFree(device_samples_data_y));
// free host memory
free(host_results_data);
free(host_samples_data_x);
free(host_samples_data_y);
free(modified_device_results_data);
// reset device
CHECK(cudaDeviceReset());
return 0;
}
static void ccc_samples_cpu_kernel(double *results_data, double *samples_data_x, double *samples_data_y, unsigned int n_samples, unsigned int point)
{
// compute the circular cross-correlation
for (int k = 0; k < n_samples; k++)
results_data[point] += (samples_data_x[k] * samples_data_y[(point + k) % n_samples]);
}
__global__ static void ccc_samples_cuda_kernel(double *__restrict__ results_data, double *__restrict__ samples_data_x, double *__restrict__ samples_data_y,
unsigned int n_samples, unsigned int distance)
{
unsigned int x, y, idx, point;
// compute the thread number
x = (unsigned int)threadIdx.x + (unsigned int)blockDim.x * (unsigned int)blockIdx.x;
y = (unsigned int)threadIdx.y + (unsigned int)blockDim.y * (unsigned int)blockIdx.y;
idx = (unsigned int)blockDim.x * (unsigned int)gridDim.x * y + x;
if (idx >= n_samples)
return; // safety precaution
// ajust point
point = (idx + (idx % 32) * (distance - 1)) % n_samples;
// compute the circular cross-correlation
for (int k = 0; k < n_samples; k++)
results_data[point] += (samples_data_x[k] * samples_data_y[(point + k) % n_samples]);
}
static double get_delta_time(void)
{
static struct timespec t0, t1;
t0 = t1;
if (clock_gettime(CLOCK_MONOTONIC, &t1) != 0)
{
perror("clock_gettime");
exit(1);
}
return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec);
}
|
f0bf031ddf5c615ce0199795594a500fcf29ed56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_atan (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(atan)(x[offset_x + gid * stride_x]);
}
} | f0bf031ddf5c615ce0199795594a500fcf29ed56.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_atan (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(atan)(x[offset_x + gid * stride_x]);
}
} |
9515539614fd5cf205f78ebce3c2ac8c57934b0c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define BLOCKS 4
#define THREADS 4
__global__ void add(int *x, int *y, int *result) {
*result = *x + *y;
};
int main(int argc, char *argv[]) {
if(argc < 2)
{
printf("need two parameters retard\n");
return 0;
}
int x = atoi(argv[1]);
int y = atoi(argv[2]);
int *x_d, *y_d, *sum_d;
hipMalloc((void**) &x_d, sizeof(int));
hipMemcpy(x_d, &x, sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**) &y_d, sizeof(int));
hipMemcpy(y_d, &y, sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**) &sum_d, sizeof(int));
hipLaunchKernelGGL(( add), dim3(BLOCKS),dim3(THREADS), 0, 0, x_d, y_d, sum_d);
int sum;
hipMemcpy(&sum, sum_d, sizeof(int), hipMemcpyDeviceToHost);
printf("%d\n", sum);
hipFree(x_d);
hipFree(y_d);
hipFree(sum_d);
};
| 9515539614fd5cf205f78ebce3c2ac8c57934b0c.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKS 4
#define THREADS 4
__global__ void add(int *x, int *y, int *result) {
*result = *x + *y;
};
int main(int argc, char *argv[]) {
if(argc < 2)
{
printf("need two parameters retard\n");
return 0;
}
int x = atoi(argv[1]);
int y = atoi(argv[2]);
int *x_d, *y_d, *sum_d;
cudaMalloc((void**) &x_d, sizeof(int));
cudaMemcpy(x_d, &x, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) &y_d, sizeof(int));
cudaMemcpy(y_d, &y, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) &sum_d, sizeof(int));
add<<<BLOCKS,THREADS>>>(x_d, y_d, sum_d);
int sum;
cudaMemcpy(&sum, sum_d, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", sum);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(sum_d);
};
|
0df20e2aad6c7a3aef50edc67362934bd25e71f8.hip | // !!! This is a file automatically generated by hipify!!!
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/KmBurstAve.cuh>
#include <faiss/gpu/impl/TestKmBurstAve.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <algorithm>
#include <thrust/fill.h>
namespace faiss {
namespace gpu {
namespace test_kmbave{
//
// Test cases
//
template<typename T>
void test_case_0(Tensor<T, 5, true, int>& dists,
Tensor<T, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<T, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<T, 4, true, int>& ave,
Tensor<T, 4, true, int>& modes,
int patchsize, float offset,
hipStream_t stream){
thrust::fill(thrust::hip::par.on(stream),
ave.data(), ave.end(),1);
}
template<typename T>
void test_case_1(Tensor<T, 5, true, int>& dists,
Tensor<T, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<T, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<T, 4, true, int>& ave,
Tensor<T, 4, true, int>& modes,
int patchsize, float offset,
hipStream_t stream){
kmb_ave(centroids,ave,stream);
}
} // namespace test_kmbave
//
// Main Test Function
//
template<typename T>
void test_kmburst_ave(int test_case,
Tensor<T, 5, true, int>& dists,
Tensor<T, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<T, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<T, 4, true, int>& ave,
Tensor<T, 4, true, int>& modes,
int patchsize, float offset,
hipStream_t stream){
if (test_case == 0){
test_kmbave::test_case_0(dists,burst,blocks,centroids,clusters,
ave,modes,patchsize,offset,stream);
}else if (test_case == 1){
test_kmbave::test_case_1(dists,burst,blocks,centroids,clusters,
ave,modes,patchsize,offset,stream);
}else{
FAISS_THROW_FMT("[TestKmBurstAve.cu]: unimplemented test case %d",test_case);
}
}
//
// Template Init
//
void test_kmburst_ave(int test_case,
Tensor<float, 5, true, int>& dists,
Tensor<float, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<float, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<float, 4, true, int>& ave,
Tensor<float, 4, true, int>& modes,
int patchsize, float offset,
hipStream_t stream){
test_kmburst_ave<float>(test_case,dists,burst,blocks,centroids, clusters,
ave, modes, patchsize, offset, stream);
}
void test_kmburst_ave(int test_case,
Tensor<half, 5, true, int>& dists,
Tensor<half, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<half, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<half, 4, true, int>& ave,
Tensor<half, 4, true, int>& modes,
int patchsize, float offset,
hipStream_t stream){
test_kmburst_ave<half>(test_case,dists,burst,blocks,centroids, clusters,
ave, modes, patchsize, offset, stream);
}
}
} | 0df20e2aad6c7a3aef50edc67362934bd25e71f8.cu |
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/KmBurstAve.cuh>
#include <faiss/gpu/impl/TestKmBurstAve.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <algorithm>
#include <thrust/fill.h>
namespace faiss {
namespace gpu {
namespace test_kmbave{
//
// Test cases
//
template<typename T>
void test_case_0(Tensor<T, 5, true, int>& dists,
Tensor<T, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<T, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<T, 4, true, int>& ave,
Tensor<T, 4, true, int>& modes,
int patchsize, float offset,
cudaStream_t stream){
thrust::fill(thrust::cuda::par.on(stream),
ave.data(), ave.end(),1);
}
template<typename T>
void test_case_1(Tensor<T, 5, true, int>& dists,
Tensor<T, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<T, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<T, 4, true, int>& ave,
Tensor<T, 4, true, int>& modes,
int patchsize, float offset,
cudaStream_t stream){
kmb_ave(centroids,ave,stream);
}
} // namespace test_kmbave
//
// Main Test Function
//
template<typename T>
void test_kmburst_ave(int test_case,
Tensor<T, 5, true, int>& dists,
Tensor<T, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<T, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<T, 4, true, int>& ave,
Tensor<T, 4, true, int>& modes,
int patchsize, float offset,
cudaStream_t stream){
if (test_case == 0){
test_kmbave::test_case_0(dists,burst,blocks,centroids,clusters,
ave,modes,patchsize,offset,stream);
}else if (test_case == 1){
test_kmbave::test_case_1(dists,burst,blocks,centroids,clusters,
ave,modes,patchsize,offset,stream);
}else{
FAISS_THROW_FMT("[TestKmBurstAve.cu]: unimplemented test case %d",test_case);
}
}
//
// Template Init
//
void test_kmburst_ave(int test_case,
Tensor<float, 5, true, int>& dists,
Tensor<float, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<float, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<float, 4, true, int>& ave,
Tensor<float, 4, true, int>& modes,
int patchsize, float offset,
cudaStream_t stream){
test_kmburst_ave<float>(test_case,dists,burst,blocks,centroids, clusters,
ave, modes, patchsize, offset, stream);
}
void test_kmburst_ave(int test_case,
Tensor<half, 5, true, int>& dists,
Tensor<half, 4, true, int>& burst,
Tensor<int, 5, true, int>& blocks,
Tensor<half, 5, true, int>& centroids,
Tensor<uint8_t, 4, true, int>& clusters,
Tensor<half, 4, true, int>& ave,
Tensor<half, 4, true, int>& modes,
int patchsize, float offset,
cudaStream_t stream){
test_kmburst_ave<half>(test_case,dists,burst,blocks,centroids, clusters,
ave, modes, patchsize, offset, stream);
}
}
} |
88ad706777ac5024afaa26e125c5a240c69ba0a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void euclidean_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p)
{
size_t
x = blockIdx.x, y = blockIdx.y;
if((x == y) && (x < n_a) && (threadIdx.x == 0))
d[y * pitch_d + x] = 0.0;
// If all element is to be computed
if(y < n_a && x < y) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += NUM_THREADS) {
float t = vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset];
temp[threadIdx.x] += (t * t);
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride)
temp[threadIdx.x] += temp[threadIdx.x + stride];
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
float s = sqrt(temp[0]);
d[y * pitch_d + x] = s;
d[x * pitch_d + y] = s;
}
}
} | 88ad706777ac5024afaa26e125c5a240c69ba0a5.cu | #include "includes.h"
__global__ void euclidean_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p)
{
size_t
x = blockIdx.x, y = blockIdx.y;
if((x == y) && (x < n_a) && (threadIdx.x == 0))
d[y * pitch_d + x] = 0.0;
// If all element is to be computed
if(y < n_a && x < y) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += NUM_THREADS) {
float t = vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset];
temp[threadIdx.x] += (t * t);
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride)
temp[threadIdx.x] += temp[threadIdx.x + stride];
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
float s = sqrt(temp[0]);
d[y * pitch_d + x] = s;
d[x * pitch_d + y] = s;
}
}
} |
127b43d46bfd640e967560e64a37a1dd9cdab75a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(128) sgemm_tn_vec_128x32
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda8,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[128*16*2 + 32*16*2 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[127-tid];
}
| 127b43d46bfd640e967560e64a37a1dd9cdab75a.cu | /*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(128) sgemm_tn_vec_128x32
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda8,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[128*16*2 + 32*16*2 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[127-tid];
}
|
0c6b5576bc9b3b3559fd2e51ca26afa32f56d57b.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 0c6b5576bc9b3b3559fd2e51ca26afa32f56d57b.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.