hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
1435e26cb1821abf2e08a184f9177e8c7cc9563e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <hip/hip_fp16.h>
//#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
//printf("gpu2 started\n");
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, __half* d_i, __half* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//printf("gpu started\n");
__shared__ __half s_w[R*S];
__shared__ __half s_i[H*W];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
//__syncthreads();//printf("wt done\n");
//if(row*width+col<(H*W+1)/2)
{
int s_i_idx = row*blockDim.x+col;
s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx];
if(s_i_idx+729 < H*W)
s_i[s_i_idx+729]= d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+729];
}
__syncthreads();
//printf("ip_done\n");
float prod = 0;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
float ip = __half2float(s_i[(stride*row+i)*ip_height+(stride*col+j)]);
prod += ip*__half2float(s_w[i*wt_width+j]);
__syncthreads();
}
}
if(prod>=0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = __float2half(0);
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
__half *IP = (__half*) malloc(batch_size*C*H*W*sizeof(__half));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
__half *WT = (__half*) malloc(M*C*R*S*sizeof(__half));
//float WT[R][S];
float* d_o;
__half* d_i;
__half* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = __float2half((float)rand()/(float)(RAND_MAX+1.0));
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = __float2half(0);
else
IP[n*C*H*W+k*H*W+c*W+d] = __float2half((float)rand()/(RAND_MAX+1.0));
}
}
}
}
if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(__half)))
{
printf("error in d_i malloc\n");
}
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(__half), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(__half)))
{
printf("error in d_w malloc\n");
}
hipMemcpy(d_w, WT, M*C*R*S*sizeof(__half), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
printf("cpu done\n");
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
dim3 dimGridRed(batch_size,256,1);
dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();
//hipFuncSetSharedMemConfig(ew_gpu_mmul, hipSharedMemBankSizeEightByte);hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
hipDeviceSynchronize();hipLaunchKernelGGL((
red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//hipLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//hipDeviceSynchronize();
hipMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
hipFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
1435e26cb1821abf2e08a184f9177e8c7cc9563e.cu
|
#include <stdio.h>
#include <iostream>
#include <cuda_fp16.h>
//#include <cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
//printf("gpu2 started\n");
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, __half* d_i, __half* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{//printf("gpu started\n");
__shared__ __half s_w[R*S];
__shared__ __half s_i[H*W];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
//__syncthreads();//printf("wt done\n");
//if(row*width+col<(H*W+1)/2)
{
int s_i_idx = row*blockDim.x+col;
s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx];
if(s_i_idx+729 < H*W)
s_i[s_i_idx+729]= d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+729];
}
__syncthreads();
//printf("ip_done\n");
float prod = 0;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
float ip = __half2float(s_i[(stride*row+i)*ip_height+(stride*col+j)]);
prod += ip*__half2float(s_w[i*wt_width+j]);
__syncthreads();
}
}
if(prod>=0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = __float2half(0);
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
__half *IP = (__half*) malloc(batch_size*C*H*W*sizeof(__half));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
__half *WT = (__half*) malloc(M*C*R*S*sizeof(__half));
//float WT[R][S];
float* d_o;
__half* d_i;
__half* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = __float2half((float)rand()/(float)(RAND_MAX+1.0));
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = __float2half(0);
else
IP[n*C*H*W+k*H*W+c*W+d] = __float2half((float)rand()/(RAND_MAX+1.0));
}
}
}
}
if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(__half)))
{
printf("error in d_i malloc\n");
}
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(__half), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(__half)))
{
printf("error in d_w malloc\n");
}
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(__half), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
printf("cpu done\n");
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
dim3 dimGridRed(batch_size,256,1);
dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();
//cudaFuncSetSharedMemConfig(ew_gpu_mmul, cudaSharedMemBankSizeEightByte);
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
cudaDeviceSynchronize();
red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//cudaLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//cudaDeviceSynchronize();
cudaMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
cudaFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
56ef8182bd17987ceabfdf90dc08ff8f2d5f4ae9.hip
|
// !!! This is a file automatically generated by hipify!!!
void printDeviceSpec()
{
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" SharedMemPerBlock: %d\n", prop.sharedMemPerBlock);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
printf(" Total global memory (bits): %d\n", prop.totalGlobalMem);
printf("\n");
}
}
|
56ef8182bd17987ceabfdf90dc08ff8f2d5f4ae9.cu
|
void printDeviceSpec()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" SharedMemPerBlock: %d\n", prop.sharedMemPerBlock);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
printf(" Total global memory (bits): %d\n", prop.totalGlobalMem);
printf("\n");
}
}
|
28e0b465fe822405431ff1fc57a131ee44dc0e2d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
### PROGRAM DESCIPTION ###
## Assigment No. 1
#Description:
- create two matrices A and B of 100,000 X 100, 000
- and store thier sum in matrix C
#Issue:
- allocating 100,000 X 100, 000 matrix of int
requires about 1.25 GB of memory in ram [detail is in attachment]
- So I am using matrix of width 10,000 X 10,000
it requires about 350 MBS of memory, which is feasible
### DEVELOPER DETAILS ###
#Name:
- M. Aamir Javid
#Email:
#Date:
Sept 16, 2014
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include< iostream>
#include <ctime>
using namespace std;
/*
#CUDA PROGRAM STRUCTURE
1. Memory Allocation on CPU and GPU
2. Initialization of Memory in CPU
3. Memcpy to GPU
4. Kernel Invocation
5. Memcpy to CPU
*/
//----------------[START] CUDA KERNEL CODE ---------------------------
__global__ void addKernel(int *a, int *b, int *c,int WIDTH)
{
int idx = ( blockDim.x * blockIdx.x) + threadIdx.x;
if( idx < WIDTH*WIDTH)
c[idx] = a[idx] + b[idx];
}
//---------------- [END] CUDA KERNEL CODE ----------------------------
//will show the memory used by one array i.e a/b/c
// viewMemoryUse( WIDTH, HEIGHT) : specific to this program
void viewMemUse(int, int);
// IniArray( ARRAY, WIDTH, HEIGHT, RandomValueSeed)
void initializeArray(int*, int, int, int);
// DisplayArray( arrayNAme i.e H_A, array, width, height)
void displayArray(char*, int *,int,int);
//
void addKernelCPU( int*, int*, int*, int);
//
void compareResult( int *arrayA, int *arrayB, int width);
int main()
{
const int WIDTH = 10000;
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
//## 1. Memory Allocation on HOST & DEVICE
//1.a Memory allocation on HOST
int SIZE_IN_BYTES = WIDTH * WIDTH * sizeof(int);
h_a = (int *) malloc( SIZE_IN_BYTES); // since square matrix so A = [ WIDTH * WIDTH]
h_b = (int *) malloc( SIZE_IN_BYTES);
h_c = (int *) malloc( SIZE_IN_BYTES);
//1.b Memory Allocation on DEVICE
hipMalloc( (void **) &d_a, SIZE_IN_BYTES);
hipMalloc( (void **) &d_b, SIZE_IN_BYTES);
hipMalloc( (void **) &d_c, SIZE_IN_BYTES);
//## 2. Memory Initialization HOST
//Initialing Host Arrays
initializeArray( h_a, WIDTH, WIDTH, 50);
initializeArray( h_b, WIDTH, WIDTH, 50);
//## 3. Memcpy HOST to DEVICE
hipMemcpy( d_a, h_a, SIZE_IN_BYTES, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, SIZE_IN_BYTES, hipMemcpyHostToDevice);
//## 4. Kernel Invocation
int mat_size= WIDTH * WIDTH;
int threadsPerBlock = 1024;
int blockPerGrid = ceil(mat_size / threadsPerBlock)+1;//97657
hipLaunchKernelGGL(( addKernel), dim3(blockPerGrid), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c, WIDTH);
////## 5. Memcpy DEVICE to HOST
hipMemcpy( h_c, d_c, SIZE_IN_BYTES, hipMemcpyDeviceToHost);
// for comparing results
int *cpu_results; // to store CPU results
cpu_results = (int *) malloc( SIZE_IN_BYTES);
addKernelCPU( h_a, h_b, cpu_results, WIDTH);
// Displaying Result
cout<<"Comparing and Displaying Result"<<endl;
compareResult( h_c, cpu_results, WIDTH);
displayArray( "a",h_a,2,2);
displayArray("b", h_b, 2,2);
displayArray("c",h_c,2,2);
displayArray("cpu_result",cpu_results,2,2);
hipFree(&d_a);
hipFree(&d_b);
hipFree(&d_c);
free(h_a);
free(h_b);
free(h_c);
system("pause");
return 0;
}
void addKernelCPU( int *arrayA, int *arrayB, int *arrayC, int width){
int arraySize = width * width;
for(int i=0; i<arraySize; i++){
arrayC[i] = arrayA[i] + arrayB[i];
}
}
void compareResult( int *arrayA, int *arrayB, int width){
int arraySize = width * width;
for(int i=0; i<arraySize; i++){
if( arrayA[i] != arrayB[i]){
cout<<"arrayA["<<i<<"] != arrayB["<<i<<"]"<<endl;
break;
}
}
cout<<"Result on CPU and GPU is same"<<endl;
}
void initializeArray(int *array, int width, int height, int randomValueSEED){
int MAT_SIZE = width * height;
// Initializing Array with random values
srand ( time(NULL) );
for( int i=0; i<MAT_SIZE; i++){
int value = rand() % randomValueSEED + 1;
array[i] = value;
}
}
void displayArray(char* arrayName,int *array,int width, int height){
cout<<"Displaying Values of Array: "<<arrayName<<endl;
for(int i=0; i<width*height; i++){
cout<<"Array["<<i<<"] : "<<array[i]<<endl;
}
}
void viewMemUse(int pWidth, int pHeight){
int size = pWidth * pHeight * sizeof(int);
cout<<"Size: of WIDTH * HEIGHT * sizeof(int)"<<endl;
cout<<"Size = "<<pWidth<<" * "<<pHeight<<" * sizeof(int)"<<endl;
cout<<"Size: BYTES "<<size<<endl;
cout<<"Size: KBYTES "<<size/1024<<endl;
cout<<"Size: MBYTES "<<(size/1024)/1024<<endl;
float gSize = ((size/1024.0)/1024.0)/1024.0;
cout<<"Size: GBYTES "<<gSize<<endl;
}
|
28e0b465fe822405431ff1fc57a131ee44dc0e2d.cu
|
/*
### PROGRAM DESCIPTION ###
## Assigment No. 1
#Description:
- create two matrices A and B of 100,000 X 100, 000
- and store thier sum in matrix C
#Issue:
- allocating 100,000 X 100, 000 matrix of int
requires about 1.25 GB of memory in ram [detail is in attachment]
- So I am using matrix of width 10,000 X 10,000
it requires about 350 MBS of memory, which is feasible
### DEVELOPER DETAILS ###
#Name:
- M. Aamir Javid
#Email:
#Date:
Sept 16, 2014
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include< iostream>
#include <ctime>
using namespace std;
/*
#CUDA PROGRAM STRUCTURE
1. Memory Allocation on CPU and GPU
2. Initialization of Memory in CPU
3. Memcpy to GPU
4. Kernel Invocation
5. Memcpy to CPU
*/
//----------------[START] CUDA KERNEL CODE ---------------------------
__global__ void addKernel(int *a, int *b, int *c,int WIDTH)
{
int idx = ( blockDim.x * blockIdx.x) + threadIdx.x;
if( idx < WIDTH*WIDTH)
c[idx] = a[idx] + b[idx];
}
//---------------- [END] CUDA KERNEL CODE ----------------------------
//will show the memory used by one array i.e a/b/c
// viewMemoryUse( WIDTH, HEIGHT) : specific to this program
void viewMemUse(int, int);
// IniArray( ARRAY, WIDTH, HEIGHT, RandomValueSeed)
void initializeArray(int*, int, int, int);
// DisplayArray( arrayNAme i.e H_A, array, width, height)
void displayArray(char*, int *,int,int);
//
void addKernelCPU( int*, int*, int*, int);
//
void compareResult( int *arrayA, int *arrayB, int width);
int main()
{
const int WIDTH = 10000;
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
//## 1. Memory Allocation on HOST & DEVICE
//1.a Memory allocation on HOST
int SIZE_IN_BYTES = WIDTH * WIDTH * sizeof(int);
h_a = (int *) malloc( SIZE_IN_BYTES); // since square matrix so A = [ WIDTH * WIDTH]
h_b = (int *) malloc( SIZE_IN_BYTES);
h_c = (int *) malloc( SIZE_IN_BYTES);
//1.b Memory Allocation on DEVICE
cudaMalloc( (void **) &d_a, SIZE_IN_BYTES);
cudaMalloc( (void **) &d_b, SIZE_IN_BYTES);
cudaMalloc( (void **) &d_c, SIZE_IN_BYTES);
//## 2. Memory Initialization HOST
//Initialing Host Arrays
initializeArray( h_a, WIDTH, WIDTH, 50);
initializeArray( h_b, WIDTH, WIDTH, 50);
//## 3. Memcpy HOST to DEVICE
cudaMemcpy( d_a, h_a, SIZE_IN_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, SIZE_IN_BYTES, cudaMemcpyHostToDevice);
//## 4. Kernel Invocation
int mat_size= WIDTH * WIDTH;
int threadsPerBlock = 1024;
int blockPerGrid = ceil(mat_size / threadsPerBlock)+1;//97657
addKernel<<<blockPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, WIDTH);
////## 5. Memcpy DEVICE to HOST
cudaMemcpy( h_c, d_c, SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
// for comparing results
int *cpu_results; // to store CPU results
cpu_results = (int *) malloc( SIZE_IN_BYTES);
addKernelCPU( h_a, h_b, cpu_results, WIDTH);
// Displaying Result
cout<<"Comparing and Displaying Result"<<endl;
compareResult( h_c, cpu_results, WIDTH);
displayArray( "a",h_a,2,2);
displayArray("b", h_b, 2,2);
displayArray("c",h_c,2,2);
displayArray("cpu_result",cpu_results,2,2);
cudaFree(&d_a);
cudaFree(&d_b);
cudaFree(&d_c);
free(h_a);
free(h_b);
free(h_c);
system("pause");
return 0;
}
void addKernelCPU( int *arrayA, int *arrayB, int *arrayC, int width){
int arraySize = width * width;
for(int i=0; i<arraySize; i++){
arrayC[i] = arrayA[i] + arrayB[i];
}
}
void compareResult( int *arrayA, int *arrayB, int width){
int arraySize = width * width;
for(int i=0; i<arraySize; i++){
if( arrayA[i] != arrayB[i]){
cout<<"arrayA["<<i<<"] != arrayB["<<i<<"]"<<endl;
break;
}
}
cout<<"Result on CPU and GPU is same"<<endl;
}
void initializeArray(int *array, int width, int height, int randomValueSEED){
int MAT_SIZE = width * height;
// Initializing Array with random values
srand ( time(NULL) );
for( int i=0; i<MAT_SIZE; i++){
int value = rand() % randomValueSEED + 1;
array[i] = value;
}
}
void displayArray(char* arrayName,int *array,int width, int height){
cout<<"Displaying Values of Array: "<<arrayName<<endl;
for(int i=0; i<width*height; i++){
cout<<"Array["<<i<<"] : "<<array[i]<<endl;
}
}
void viewMemUse(int pWidth, int pHeight){
int size = pWidth * pHeight * sizeof(int);
cout<<"Size: of WIDTH * HEIGHT * sizeof(int)"<<endl;
cout<<"Size = "<<pWidth<<" * "<<pHeight<<" * sizeof(int)"<<endl;
cout<<"Size: BYTES "<<size<<endl;
cout<<"Size: KBYTES "<<size/1024<<endl;
cout<<"Size: MBYTES "<<(size/1024)/1024<<endl;
float gSize = ((size/1024.0)/1024.0)/1024.0;
cout<<"Size: GBYTES "<<gSize<<endl;
}
|
010d847977dbe5ca5032cdfe268cf714fcaef614.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NUM_BLOCKS (1)
#define BLOCK_WIDTH (4)
__global__
void hello()
{
int idx = threadIdx.x;
// all the threads in the block can access the block-shared memory
__shared__ int array[BLOCK_WIDTH];
// initize the memory
array[idx] = threadIdx.x;
// stop all the threads to read before initilized completely
printf("Hello world! I'm a thread in block %d\n", idx);
__syncthreads();
printf("Hello world! I'm a thread in block %d\n", idx);
if (idx < BLOCK_WIDTH-1)
{
// pre-read the memory
int temp = array[idx+1];
__syncthreads();
// when read over, write it
array[idx] = temp;
__syncthreads(); // use sync to assure all the memory have written
}
}
int main()
{
hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, );
// force the printf()s to flush
hipDeviceSynchronize();
printf("That's all!\n");
return 0;
}
|
010d847977dbe5ca5032cdfe268cf714fcaef614.cu
|
#include <stdio.h>
#define NUM_BLOCKS (1)
#define BLOCK_WIDTH (4)
__global__
void hello()
{
int idx = threadIdx.x;
// all the threads in the block can access the block-shared memory
__shared__ int array[BLOCK_WIDTH];
// initize the memory
array[idx] = threadIdx.x;
// stop all the threads to read before initilized completely
printf("Hello world! I'm a thread in block %d\n", idx);
__syncthreads();
printf("Hello world! I'm a thread in block %d\n", idx);
if (idx < BLOCK_WIDTH-1)
{
// pre-read the memory
int temp = array[idx+1];
__syncthreads();
// when read over, write it
array[idx] = temp;
__syncthreads(); // use sync to assure all the memory have written
}
}
int main()
{
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
}
|
fe6a9df3705d5d0c4316025e13221b50af157855.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
////////////////////////////////////////////////////////////////////////////////////////////////////
#define MMHA_LAUNCH_KERNEL( \
T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \
size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
hipLaunchKernelGGL(( mmha::masked_multihead_attention_kernel<T, \
Dh, \
Dh_MAX, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK, \
DO_CROSS_ATTENTION, \
HAS_BEAMS>), dim3(grid), dim3(THDS_PER_BLOCK), smem_sz, stream, params)
////////////////////////////////////////////////////////////////////////////////////////////////////
// !!! Specialize the launcher for Cross attention
template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const hipStream_t& stream)
{
constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value;
constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value;
int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep;
if (params.cache_indir == nullptr) {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream);
}
}
else {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template void mmha_launch_kernel<float, 112, 128, Masked_multihead_attention_params<float>>(
const Masked_multihead_attention_params<float>& params, const hipStream_t& stream);
template void mmha_launch_kernel<uint16_t, 112, 128, Masked_multihead_attention_params<uint16_t>>(
const Masked_multihead_attention_params<uint16_t>& params, const hipStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 112, 128, Masked_multihead_attention_params<__nv_bfloat16>>(
const Masked_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 112, 128, Masked_multihead_attention_params<__nv_fp8_e4m3>>(
const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream);
#endif
template void mmha_launch_kernel<float, 112, 128, Cross_multihead_attention_params<float>>(
const Cross_multihead_attention_params<float>& params, const hipStream_t& stream);
template void mmha_launch_kernel<uint16_t, 112, 128, Cross_multihead_attention_params<uint16_t>>(
const Cross_multihead_attention_params<uint16_t>& params, const hipStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 112, 128, Cross_multihead_attention_params<__nv_bfloat16>>(
const Cross_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 112, 128, Cross_multihead_attention_params<__nv_fp8_e4m3>>(
const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream);
#endif
#undef MMHA_LAUNCH_KERNEL
|
fe6a9df3705d5d0c4316025e13221b50af157855.cu
|
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
////////////////////////////////////////////////////////////////////////////////////////////////////
#define MMHA_LAUNCH_KERNEL( \
T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \
size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
mmha::masked_multihead_attention_kernel<T, \
Dh, \
Dh_MAX, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK, \
DO_CROSS_ATTENTION, \
HAS_BEAMS><<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params)
////////////////////////////////////////////////////////////////////////////////////////////////////
// !!! Specialize the launcher for Cross attention
template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream)
{
constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value;
constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value;
int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep;
if (params.cache_indir == nullptr) {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream);
}
}
else {
if (tlength < 32) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream);
}
else if (tlength < 2048) {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream);
}
else {
MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template void mmha_launch_kernel<float, 112, 128, Masked_multihead_attention_params<float>>(
const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream);
template void mmha_launch_kernel<uint16_t, 112, 128, Masked_multihead_attention_params<uint16_t>>(
const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 112, 128, Masked_multihead_attention_params<__nv_bfloat16>>(
const Masked_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 112, 128, Masked_multihead_attention_params<__nv_fp8_e4m3>>(
const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream);
#endif
template void mmha_launch_kernel<float, 112, 128, Cross_multihead_attention_params<float>>(
const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream);
template void mmha_launch_kernel<uint16_t, 112, 128, Cross_multihead_attention_params<uint16_t>>(
const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
#ifdef ENABLE_BF16
template void mmha_launch_kernel<__nv_bfloat16, 112, 128, Cross_multihead_attention_params<__nv_bfloat16>>(
const Cross_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream);
#endif
#ifdef ENABLE_FP8
template void mmha_launch_kernel<__nv_fp8_e4m3, 112, 128, Cross_multihead_attention_params<__nv_fp8_e4m3>>(
const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream);
#endif
#undef MMHA_LAUNCH_KERNEL
|
28e557947cd0ba7a1b6bda79a4952396c13ede2f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
//#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 1
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{
__shared__ float s_w[R*S];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
__syncthreads();
float prod = 0;
//int row = threadIdx.y; int col = threadIdx.x;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)];
//float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)];
prod += ip*s_w[i*wt_width+j];
__syncthreads();
}
}
if(prod>=0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = 0;
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice);
if(hipSuccess != hipMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
dim3 dimGridRed(batch_size,256,1);
dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
hipDeviceSynchronize();hipLaunchKernelGGL((
red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//hipLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//hipDeviceSynchronize();
hipMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
hipFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
28e557947cd0ba7a1b6bda79a4952396c13ede2f.cu
|
#include <stdio.h>
#include <iostream>
//#include <cooperative_groups.h>
#include <math.h>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
//using namespace cooperative_groups;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 1
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt)
{
float red_sum = 0;
int row = threadIdx.y; int col = threadIdx.x;
for(int i=0; i<num_ch; i++)
{
red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ;
}
d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum;
}
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{
__shared__ float s_w[R*S];
int row = threadIdx.y; int col = threadIdx.x;
if(row*width+col<R*S)
{
s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)];
}
__syncthreads();
float prod = 0;
//int row = threadIdx.y; int col = threadIdx.x;
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)];
//float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(i*wt_width+j)];
prod += ip*s_w[i*wt_width+j];
__syncthreads();
}
}
if(prod>=0)
d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod;
if(row*width+col<R*S){
s_w[(row*width+col)] = 0;
__syncthreads();
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
float* d_r;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
//WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0);
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)))
{
printf("error in d_i malloc\n");
}
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)))
{
printf("error in d_w malloc\n");
}
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float)))
{
printf("error in d_o malloc\n");
}
if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float)))
{
printf("error in d_r malloc\n");
}
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//cpu_end = clock();
dim3 dimGrid(batch_size,256,96);
dim3 dimBlock(27,27,1);
dim3 dimGridRed(batch_size,256,1);
dim3 dimBlockRed(27,27,1);
//int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384;
//gpu_start = clock();
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
cudaDeviceSynchronize();
red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,96,batch_size,256);
//gpu_end = clock();
//void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch };
//cudaLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL);
//cudaDeviceSynchronize();
cudaMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
/**print outputs**/
//int e,f,g,h;
int g,h,s,u;
float max_error = 0;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
//for (t=0;t<C;t++){
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//}
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
cudaFree(d_r);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
895fb23a7f6f52c9e657fa849a5d177306a28214.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/MultiLabelMarginCriterion.hip"
#else
static inline void THNN_(MultiLabelMarginCriterion_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *target) {
if (input->dim() <= 1) {
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
TORCH_CHECK(!target->is_empty() && (target->dim() <= 1) && (target_size == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else if (input->dim() == 2) {
int nframe = input->size(0);
int dim = input->size(1);
TORCH_CHECK(!target->is_empty() && (target->dim() == 2)
&& (target->size(0) == nframe) && (target->size(1) == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else {
TORCH_CHECK(false, "non-empty vector or matrix expected, got size: ", input->sizes());
}
}
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, target);
if(input->dim() <= 1)
{
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THCTensor_(resize0d)(state, output);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(hipGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize0d)(state, output);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(hipGetLastError());
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp)));
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
THCudaCheck(hipGetLastError());
}
}
else {
TORCH_INTERNAL_ASSERT(false, "non-empty vector or matrix expected (shouldn't get here)");
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(gradInput->dim() <= 1)
{
int dim = gradInput->dim() == 0 ? 1 : gradInput->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THArgCheck(!target->is_empty() && (target->dim() <= 1) && (target_size == dim), 3,
"inconsistent target size");
TORCH_CHECK(target->sizes() == istarget->sizes(), "inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else {
AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes());
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
|
895fb23a7f6f52c9e657fa849a5d177306a28214.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/MultiLabelMarginCriterion.cu"
#else
static inline void THNN_(MultiLabelMarginCriterion_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *target) {
if (input->dim() <= 1) {
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
TORCH_CHECK(!target->is_empty() && (target->dim() <= 1) && (target_size == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else if (input->dim() == 2) {
int nframe = input->size(0);
int dim = input->size(1);
TORCH_CHECK(!target->is_empty() && (target->dim() == 2)
&& (target->size(0) == nframe) && (target->size(1) == dim),
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else {
TORCH_CHECK(false, "non-empty vector or matrix expected, got size: ", input->sizes());
}
}
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, target);
if(input->dim() <= 1)
{
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THCTensor_(resize0d)(state, output);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(cudaGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize0d)(state, output);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(cudaGetLastError());
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp)));
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
THCudaCheck(cudaGetLastError());
}
}
else {
TORCH_INTERNAL_ASSERT(false, "non-empty vector or matrix expected (shouldn't get here)");
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(gradInput->dim() <= 1)
{
int dim = gradInput->dim() == 0 ? 1 : gradInput->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THArgCheck(!target->is_empty() && (target->dim() <= 1) && (target_size == dim), 3,
"inconsistent target size");
TORCH_CHECK(target->sizes() == istarget->sizes(), "inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else {
AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes());
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
|
3ddf1180a81b6d812a29841fcf8e6f8c13790f79.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "RiemannFit.h"
#include "test_common.h"
#include "../../cuda/cudaCheck.h"
using namespace Eigen;
namespace Rfit {
constexpr uint32_t maxNumberOfTracks() { return 5*1024; }
constexpr uint32_t stride() { return maxNumberOfTracks();}
using Matrix3x4d = Eigen::Matrix<double,3,4>;
using Map3x4d = Eigen::Map<Matrix3x4d,0,Eigen::Stride<3*stride(),stride()> >;
using Matrix6x4f = Eigen::Matrix<float,6,4>;
using Map6x4f = Eigen::Map<Matrix6x4f,0,Eigen::Stride<6*stride(),stride()> >;
using Map4d = Eigen::Map<Vector4d,0,Eigen::InnerStride<stride()> >;
}
__global__
void kernelFastFit(double * __restrict__ phits, double * __restrict__ presults) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map4d result(presults+i,4);
Rfit::Fast_fit(hits, result);
}
__global__
void kernelCircleFit(double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit_input,
double B,
Rfit::circle_fit * circle_fit_resultsGPU) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map4d fast_fit_input(pfast_fit_input+i,4);
Rfit::Map6x4f hits_ge(phits_ge+i,6,4);
constexpr uint32_t N = Rfit::Map3x4d::ColsAtCompileTime;
constexpr auto n = N;
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n);
Rfit::loadCovariance2D(hits_ge,hits_cov);
#if TEST_DEBUG
if (0==i) {
printf("hits %f, %f\n", hits.block(0,0,2,n)(0,0), hits.block(0,0,2,n)(0,1));
printf("hits %f, %f\n", hits.block(0,0,2,n)(1,0), hits.block(0,0,2,n)(1,1));
printf("fast_fit_input(0): %f\n", fast_fit_input(0));
printf("fast_fit_input(1): %f\n", fast_fit_input(1));
printf("fast_fit_input(2): %f\n", fast_fit_input(2));
printf("fast_fit_input(3): %f\n", fast_fit_input(3));
printf("rad(0,0): %f\n", rad(0,0));
printf("rad(1,1): %f\n", rad(1,1));
printf("rad(2,2): %f\n", rad(2,2));
printf("hits_cov(0,0): %f\n", (*hits_cov)(0,0));
printf("hits_cov(1,1): %f\n", (*hits_cov)(1,1));
printf("hits_cov(2,2): %f\n", (*hits_cov)(2,2));
printf("hits_cov(11,11): %f\n", (*hits_cov)(11,11));
printf("B: %f\n", B);
}
#endif
circle_fit_resultsGPU[i] =
Rfit::Circle_fit(hits.block(0,0,2,n), hits_cov,
fast_fit_input, rad, B, true);
#if TEST_DEBUG
if (0==i) {
printf("Circle param %f,%f,%f\n",circle_fit_resultsGPU[i].par(0),circle_fit_resultsGPU[i].par(1),circle_fit_resultsGPU[i].par(2));
}
#endif
}
__global__
void kernelLineFit(double * __restrict__ phits,
float * __restrict__ phits_ge,
Rfit::circle_fit * circle_fit,
double * __restrict__ pfast_fit,
Rfit::line_fit * line_fit)
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map4d fast_fit(pfast_fit+i,4);
Rfit::Map6x4f hits_ge(phits_ge+i,6,4);
line_fit[i] = Rfit::Line_fit(hits, hits_ge, circle_fit[i], fast_fit, true);
}
template<typename M3x4, typename M6x4>
__device__ __host__
void fillHitsAndHitsCov(M3x4 & hits, M6x4 & hits_ge) {
hits << 1.98645, 4.72598, 7.65632, 11.3151,
2.18002, 4.88864, 7.75845, 11.3134,
2.46338, 6.99838, 11.808, 17.793;
hits_ge.col(0)[0] = 7.14652e-06;
hits_ge.col(1)[0] = 2.15789e-06;
hits_ge.col(2)[0] = 1.63328e-06;
hits_ge.col(3)[0] = 6.27919e-06;
hits_ge.col(0)[2] = 6.10348e-06;
hits_ge.col(1)[2] = 2.08211e-06;
hits_ge.col(2)[2] = 1.61672e-06;
hits_ge.col(3)[2] = 6.28081e-06;
hits_ge.col(0)[5] = 5.184e-05;
hits_ge.col(1)[5] = 1.444e-05;
hits_ge.col(2)[5] = 6.25e-06;
hits_ge.col(3)[5] = 3.136e-05;
hits_ge.col(0)[1] = -5.60077e-06;
hits_ge.col(1)[1] = -1.11936e-06;
hits_ge.col(2)[1] = -6.24945e-07;
hits_ge.col(3)[1] = -5.28e-06;
}
__global__
void kernelFillHitsAndHitsCov(double * __restrict__ phits,
float * phits_ge) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map6x4f hits_ge(phits_ge+i,6,4);
hits_ge = MatrixXf::Zero(6,4);
fillHitsAndHitsCov(hits,hits_ge);
}
void testFit() {
constexpr double B = 0.0113921;
Rfit::Matrix3xNd<4> hits;
Rfit::Matrix6x4f hits_ge = MatrixXf::Zero(6,4);
double * hitsGPU = nullptr;;
float * hits_geGPU = nullptr;
double * fast_fit_resultsGPU = nullptr;
double * fast_fit_resultsGPUret = new double[Rfit::maxNumberOfTracks()*sizeof(Vector4d)];
Rfit::circle_fit * circle_fit_resultsGPU = nullptr;
Rfit::circle_fit * circle_fit_resultsGPUret = new Rfit::circle_fit();
Rfit::line_fit * line_fit_resultsGPU = nullptr;
fillHitsAndHitsCov(hits, hits_ge);
std::cout << "sizes " << sizeof(hits) << ' ' << sizeof(hits_ge)
<< ' ' << sizeof(Vector4d)<< std::endl;
std::cout << "Generated hits:\n" << hits << std::endl;
std::cout << "Generated cov:\n" << hits_ge << std::endl;
// FAST_FIT_CPU
Vector4d fast_fit_results; Rfit::Fast_fit(hits, fast_fit_results);
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]):\n" << fast_fit_results << std::endl;
// for timing purposes we fit 4096 tracks
constexpr uint32_t Ntracks = 4096;
cudaCheck(hipMalloc(&hitsGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::Matrix3xNd<4>)));
cudaCheck(hipMalloc(&hits_geGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::Matrix6x4f)));
cudaCheck(hipMalloc(&fast_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Vector4d)));
cudaCheck(hipMalloc((void **)&line_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::line_fit)));
cudaCheck(hipMalloc((void **)&circle_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::circle_fit)));
hipLaunchKernelGGL(( kernelFillHitsAndHitsCov), dim3(Ntracks/64), dim3(64), 0, 0, hitsGPU,hits_geGPU);
// FAST_FIT GPU
hipLaunchKernelGGL(( kernelFastFit), dim3(Ntracks/64), dim3(64), 0, 0, hitsGPU, fast_fit_resultsGPU);
hipDeviceSynchronize();
hipMemcpy(fast_fit_resultsGPUret, fast_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Vector4d), hipMemcpyDeviceToHost);
Rfit::Map4d fast_fit(fast_fit_resultsGPUret+10,4);
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]): GPU\n" << fast_fit << std::endl;
assert(isEqualFuzzy(fast_fit_results, fast_fit));
// CIRCLE_FIT CPU
constexpr uint32_t N = Rfit::Map3x4d::ColsAtCompileTime;
constexpr auto n = N;
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n);
Rfit::loadCovariance2D(hits_ge,hits_cov);
Rfit::circle_fit circle_fit_results = Rfit::Circle_fit(hits.block(0, 0, 2, n),
hits_cov,
fast_fit_results, rad, B, true);
std::cout << "Fitted values (CircleFit):\n" << circle_fit_results.par << std::endl;
// CIRCLE_FIT GPU
hipLaunchKernelGGL(( kernelCircleFit), dim3(Ntracks/64), dim3(64), 0, 0, hitsGPU, hits_geGPU,
fast_fit_resultsGPU, B, circle_fit_resultsGPU);
hipDeviceSynchronize();
hipMemcpy(circle_fit_resultsGPUret, circle_fit_resultsGPU,
sizeof(Rfit::circle_fit), hipMemcpyDeviceToHost);
std::cout << "Fitted values (CircleFit) GPU:\n" << circle_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(circle_fit_results.par, circle_fit_resultsGPUret->par));
// LINE_FIT CPU
Rfit::line_fit line_fit_results = Rfit::Line_fit(hits, hits_ge, circle_fit_results, fast_fit_results, true);
std::cout << "Fitted values (LineFit):\n" << line_fit_results.par << std::endl;
// LINE_FIT GPU
Rfit::line_fit * line_fit_resultsGPUret = new Rfit::line_fit();
hipLaunchKernelGGL(( kernelLineFit), dim3(Ntracks/64), dim3(64), 0, 0, hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU);
hipDeviceSynchronize();
hipMemcpy(line_fit_resultsGPUret, line_fit_resultsGPU, sizeof(Rfit::line_fit), hipMemcpyDeviceToHost);
std::cout << "Fitted values (LineFit) GPU:\n" << line_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(line_fit_results.par, line_fit_resultsGPUret->par));
std::cout << "Fitted cov (CircleFit) CPU:\n" << circle_fit_results.cov << std::endl;
std::cout << "Fitted cov (LineFit): CPU\n" << line_fit_results.cov << std::endl;
std::cout << "Fitted cov (CircleFit) GPU:\n" << circle_fit_resultsGPUret->cov << std::endl;
std::cout << "Fitted cov (LineFit): GPU\n" << line_fit_resultsGPUret->cov << std::endl;
}
int main (int argc, char * argv[]) {
testFit();
std::cout << "TEST FIT, NO ERRORS" << std::endl;
return 0;
}
|
3ddf1180a81b6d812a29841fcf8e6f8c13790f79.cu
|
#include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "RiemannFit.h"
#include "test_common.h"
#include "../../cuda/cudaCheck.h"
using namespace Eigen;
namespace Rfit {
constexpr uint32_t maxNumberOfTracks() { return 5*1024; }
constexpr uint32_t stride() { return maxNumberOfTracks();}
using Matrix3x4d = Eigen::Matrix<double,3,4>;
using Map3x4d = Eigen::Map<Matrix3x4d,0,Eigen::Stride<3*stride(),stride()> >;
using Matrix6x4f = Eigen::Matrix<float,6,4>;
using Map6x4f = Eigen::Map<Matrix6x4f,0,Eigen::Stride<6*stride(),stride()> >;
using Map4d = Eigen::Map<Vector4d,0,Eigen::InnerStride<stride()> >;
}
__global__
void kernelFastFit(double * __restrict__ phits, double * __restrict__ presults) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map4d result(presults+i,4);
Rfit::Fast_fit(hits, result);
}
__global__
void kernelCircleFit(double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit_input,
double B,
Rfit::circle_fit * circle_fit_resultsGPU) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map4d fast_fit_input(pfast_fit_input+i,4);
Rfit::Map6x4f hits_ge(phits_ge+i,6,4);
constexpr uint32_t N = Rfit::Map3x4d::ColsAtCompileTime;
constexpr auto n = N;
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n);
Rfit::loadCovariance2D(hits_ge,hits_cov);
#if TEST_DEBUG
if (0==i) {
printf("hits %f, %f\n", hits.block(0,0,2,n)(0,0), hits.block(0,0,2,n)(0,1));
printf("hits %f, %f\n", hits.block(0,0,2,n)(1,0), hits.block(0,0,2,n)(1,1));
printf("fast_fit_input(0): %f\n", fast_fit_input(0));
printf("fast_fit_input(1): %f\n", fast_fit_input(1));
printf("fast_fit_input(2): %f\n", fast_fit_input(2));
printf("fast_fit_input(3): %f\n", fast_fit_input(3));
printf("rad(0,0): %f\n", rad(0,0));
printf("rad(1,1): %f\n", rad(1,1));
printf("rad(2,2): %f\n", rad(2,2));
printf("hits_cov(0,0): %f\n", (*hits_cov)(0,0));
printf("hits_cov(1,1): %f\n", (*hits_cov)(1,1));
printf("hits_cov(2,2): %f\n", (*hits_cov)(2,2));
printf("hits_cov(11,11): %f\n", (*hits_cov)(11,11));
printf("B: %f\n", B);
}
#endif
circle_fit_resultsGPU[i] =
Rfit::Circle_fit(hits.block(0,0,2,n), hits_cov,
fast_fit_input, rad, B, true);
#if TEST_DEBUG
if (0==i) {
printf("Circle param %f,%f,%f\n",circle_fit_resultsGPU[i].par(0),circle_fit_resultsGPU[i].par(1),circle_fit_resultsGPU[i].par(2));
}
#endif
}
__global__
void kernelLineFit(double * __restrict__ phits,
float * __restrict__ phits_ge,
Rfit::circle_fit * circle_fit,
double * __restrict__ pfast_fit,
Rfit::line_fit * line_fit)
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map4d fast_fit(pfast_fit+i,4);
Rfit::Map6x4f hits_ge(phits_ge+i,6,4);
line_fit[i] = Rfit::Line_fit(hits, hits_ge, circle_fit[i], fast_fit, true);
}
template<typename M3x4, typename M6x4>
__device__ __host__
void fillHitsAndHitsCov(M3x4 & hits, M6x4 & hits_ge) {
hits << 1.98645, 4.72598, 7.65632, 11.3151,
2.18002, 4.88864, 7.75845, 11.3134,
2.46338, 6.99838, 11.808, 17.793;
hits_ge.col(0)[0] = 7.14652e-06;
hits_ge.col(1)[0] = 2.15789e-06;
hits_ge.col(2)[0] = 1.63328e-06;
hits_ge.col(3)[0] = 6.27919e-06;
hits_ge.col(0)[2] = 6.10348e-06;
hits_ge.col(1)[2] = 2.08211e-06;
hits_ge.col(2)[2] = 1.61672e-06;
hits_ge.col(3)[2] = 6.28081e-06;
hits_ge.col(0)[5] = 5.184e-05;
hits_ge.col(1)[5] = 1.444e-05;
hits_ge.col(2)[5] = 6.25e-06;
hits_ge.col(3)[5] = 3.136e-05;
hits_ge.col(0)[1] = -5.60077e-06;
hits_ge.col(1)[1] = -1.11936e-06;
hits_ge.col(2)[1] = -6.24945e-07;
hits_ge.col(3)[1] = -5.28e-06;
}
__global__
void kernelFillHitsAndHitsCov(double * __restrict__ phits,
float * phits_ge) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
Rfit::Map3x4d hits(phits+i,3,4);
Rfit::Map6x4f hits_ge(phits_ge+i,6,4);
hits_ge = MatrixXf::Zero(6,4);
fillHitsAndHitsCov(hits,hits_ge);
}
void testFit() {
constexpr double B = 0.0113921;
Rfit::Matrix3xNd<4> hits;
Rfit::Matrix6x4f hits_ge = MatrixXf::Zero(6,4);
double * hitsGPU = nullptr;;
float * hits_geGPU = nullptr;
double * fast_fit_resultsGPU = nullptr;
double * fast_fit_resultsGPUret = new double[Rfit::maxNumberOfTracks()*sizeof(Vector4d)];
Rfit::circle_fit * circle_fit_resultsGPU = nullptr;
Rfit::circle_fit * circle_fit_resultsGPUret = new Rfit::circle_fit();
Rfit::line_fit * line_fit_resultsGPU = nullptr;
fillHitsAndHitsCov(hits, hits_ge);
std::cout << "sizes " << sizeof(hits) << ' ' << sizeof(hits_ge)
<< ' ' << sizeof(Vector4d)<< std::endl;
std::cout << "Generated hits:\n" << hits << std::endl;
std::cout << "Generated cov:\n" << hits_ge << std::endl;
// FAST_FIT_CPU
Vector4d fast_fit_results; Rfit::Fast_fit(hits, fast_fit_results);
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]):\n" << fast_fit_results << std::endl;
// for timing purposes we fit 4096 tracks
constexpr uint32_t Ntracks = 4096;
cudaCheck(cudaMalloc(&hitsGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::Matrix3xNd<4>)));
cudaCheck(cudaMalloc(&hits_geGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::Matrix6x4f)));
cudaCheck(cudaMalloc(&fast_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Vector4d)));
cudaCheck(cudaMalloc((void **)&line_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::line_fit)));
cudaCheck(cudaMalloc((void **)&circle_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Rfit::circle_fit)));
kernelFillHitsAndHitsCov<<<Ntracks/64, 64>>>(hitsGPU,hits_geGPU);
// FAST_FIT GPU
kernelFastFit<<<Ntracks/64, 64>>>(hitsGPU, fast_fit_resultsGPU);
cudaDeviceSynchronize();
cudaMemcpy(fast_fit_resultsGPUret, fast_fit_resultsGPU, Rfit::maxNumberOfTracks()*sizeof(Vector4d), cudaMemcpyDeviceToHost);
Rfit::Map4d fast_fit(fast_fit_resultsGPUret+10,4);
std::cout << "Fitted values (FastFit, [X0, Y0, R, tan(theta)]): GPU\n" << fast_fit << std::endl;
assert(isEqualFuzzy(fast_fit_results, fast_fit));
// CIRCLE_FIT CPU
constexpr uint32_t N = Rfit::Map3x4d::ColsAtCompileTime;
constexpr auto n = N;
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = MatrixXd::Zero(2 * n, 2 * n);
Rfit::loadCovariance2D(hits_ge,hits_cov);
Rfit::circle_fit circle_fit_results = Rfit::Circle_fit(hits.block(0, 0, 2, n),
hits_cov,
fast_fit_results, rad, B, true);
std::cout << "Fitted values (CircleFit):\n" << circle_fit_results.par << std::endl;
// CIRCLE_FIT GPU
kernelCircleFit<<<Ntracks/64, 64>>>(hitsGPU, hits_geGPU,
fast_fit_resultsGPU, B, circle_fit_resultsGPU);
cudaDeviceSynchronize();
cudaMemcpy(circle_fit_resultsGPUret, circle_fit_resultsGPU,
sizeof(Rfit::circle_fit), cudaMemcpyDeviceToHost);
std::cout << "Fitted values (CircleFit) GPU:\n" << circle_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(circle_fit_results.par, circle_fit_resultsGPUret->par));
// LINE_FIT CPU
Rfit::line_fit line_fit_results = Rfit::Line_fit(hits, hits_ge, circle_fit_results, fast_fit_results, true);
std::cout << "Fitted values (LineFit):\n" << line_fit_results.par << std::endl;
// LINE_FIT GPU
Rfit::line_fit * line_fit_resultsGPUret = new Rfit::line_fit();
kernelLineFit<<<Ntracks/64, 64>>>(hitsGPU, hits_geGPU, circle_fit_resultsGPU, fast_fit_resultsGPU, line_fit_resultsGPU);
cudaDeviceSynchronize();
cudaMemcpy(line_fit_resultsGPUret, line_fit_resultsGPU, sizeof(Rfit::line_fit), cudaMemcpyDeviceToHost);
std::cout << "Fitted values (LineFit) GPU:\n" << line_fit_resultsGPUret->par << std::endl;
assert(isEqualFuzzy(line_fit_results.par, line_fit_resultsGPUret->par));
std::cout << "Fitted cov (CircleFit) CPU:\n" << circle_fit_results.cov << std::endl;
std::cout << "Fitted cov (LineFit): CPU\n" << line_fit_results.cov << std::endl;
std::cout << "Fitted cov (CircleFit) GPU:\n" << circle_fit_resultsGPUret->cov << std::endl;
std::cout << "Fitted cov (LineFit): GPU\n" << line_fit_resultsGPUret->cov << std::endl;
}
int main (int argc, char * argv[]) {
testFit();
std::cout << "TEST FIT, NO ERRORS" << std::endl;
return 0;
}
|
283f601e4ac536fc7ebf2e2d00268d21e35548b6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void softplus64(double* A, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = log1p(exp(A[idx]));
}
#ifdef __cplusplus
}
#endif
|
283f601e4ac536fc7ebf2e2d00268d21e35548b6.cu
|
#include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void softplus64(double* A, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = log1p(exp(A[idx]));
}
#ifdef __cplusplus
}
#endif
|
9273280fec0f37623dd7319b55c06b8db3f281f1.hip
|
// !!! This is a file automatically generated by hipify!!!
//#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <hip/hip_runtime.h>
//using namespace std;
//#include <ctime>
//#include "hip/hip_runtime.h"
//#include "hiprand/hiprand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
////#include "global_hip.cuh"
//#define TRAIN_NUM 60000
//#define TEST_NUM 10000
//#define ROW 28
//#define COL 28
//#define CONV_SIZE 24
//#define POOL_SIZE 12
//#define FC1_SIZE 45
//#define FC2_SIZE 10
//#define CONV_W_SIZE 5
//#define CONV_W_NUM 6
//
//__constant__ float _alpha;
//__constant__ int _minibatch;
//__constant__ int _epochs;
//
//__device__ int _correct_cnt;
//__device__ float _avg_error;
//
//__device__ float _train_image[TRAIN_NUM][ROW][COL];
//__device__ int _train_label[TRAIN_NUM];
//__device__ float _test_image[TEST_NUM][ROW][COL];
//__device__ int _test_label[TEST_NUM];
//
//__device__ float _conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//__device__ float _conv_b[CONV_W_NUM];
//__device__ float _fc1_b[FC1_SIZE];
//__device__ float _fc1_w[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _fc2_b[FC2_SIZE];
//__device__ float _fc2_w[FC2_SIZE][FC1_SIZE];
//
//__device__ float _input[ROW][COL];
//__device__ float _conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//__device__ float _conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//__device__ int _pool_pos[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _pool[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _fc1_z[FC1_SIZE];
//__device__ float _fc1_a[FC1_SIZE];
//__device__ float _fc2_z[FC2_SIZE];
//__device__ float _fc2_a[FC2_SIZE];
//__device__ float _output[FC2_SIZE];
//__device__ int _answer[FC2_SIZE];
//
//__device__ float _conv_dw[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//__device__ float _conv_db[CONV_W_NUM];
//__device__ float _fc1_db[FC1_SIZE];
//__device__ float _fc1_dw[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _fc2_db[FC2_SIZE];
//__device__ float _fc2_dw[FC2_SIZE][FC1_SIZE];
//__device__ float _C[FC2_SIZE];
//__device__ float _fc2_delta[FC2_SIZE];
//__device__ float _fc1_delta[FC1_SIZE];
//__device__ float _conv_sigma_delta[CONV_W_NUM];
//__device__ float _conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//
//__device__ int tmp;
//
//
//float alpha = 0.2;
//int epochs = 5;
//int minibatch = 1;
//
//float train_image[TRAIN_NUM][ROW][COL];
//int train_label[TRAIN_NUM];
//float test_image[TEST_NUM][ROW][COL];
//int test_label[TEST_NUM];
//
//float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//float conv_b[CONV_W_NUM];
//float fc1_b[FC1_SIZE];
//float fc1_w[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float fc2_b[FC2_SIZE];
//float fc2_w[FC2_SIZE][FC1_SIZE];
//
//float input[ROW][COL];
//float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//float conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//int pool_pos[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float pool[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float fc1_z[FC1_SIZE];
//float fc1_a[FC1_SIZE];
//float fc2_z[FC2_SIZE];
//float fc2_a[FC2_SIZE];
//float output[FC2_SIZE];
//int answer[FC2_SIZE];
//
//float conv_dw[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//float conv_db[CONV_W_NUM];
//float fc1_db[FC1_SIZE];
//float fc1_dw[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float fc2_db[FC2_SIZE];
//float fc2_dw[FC2_SIZE][FC1_SIZE];
//float C[FC2_SIZE];
//float fc2_delta[FC2_SIZE];
//float fc1_delta[FC1_SIZE];
//float conv_sigma_delta[CONV_W_NUM];
//float conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//
//#define CHECK(call)\
//{\
// const hipError_t error=call;\
// if(error!=hipSuccess)\
// {\
// printf("ERROR: %s:%d,",__FILE__,__LINE__);\
// printf("code:%d,reason:%s\n",error,hipGetErrorString(error));\
// exit(1);\
// }\
//}
//int swap_endian(int val)
//{
// unsigned char c1, c2, c3, c4;
// c1 = val & 255;
// c2 = (val >> 8) & 255;
// c3 = (val >> 16) & 255;
// c4 = (val >> 24) & 255;
// return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4;
//}
//
//float get_rand(float fan_in)
//{
// float sum = 0;
// for (int i = 0;i < 12;i++)
// sum += (float)rand() / RAND_MAX;
// sum -= 6;
// sum *= 1 / sqrt(fan_in);
// return sum;
//}
//void initDevice(int devNum)
//{
// int dev = devNum;
// hipDeviceProp_t deviceProp;
// CHECK(hipGetDeviceProperties(&deviceProp, dev));
// printf("Using device %d: %s\n", dev, deviceProp.name);
// CHECK(hipSetDevice(dev));
//}
//
//__device__ float _get_rand(int _rand, float fan_in)
//{
// float sum = 0;
// for (int i = 0;i < 12;i++)
// sum += (float)_rand / RAND_MAX;
// sum -= 6;
// sum *= 1 / sqrt(fan_in);
// return sum;
//}
//
//__device__ float _sigmoid(float x)
//{
// return (1 / (1 + exp(-1 * x)));
//}
//
////#include "io.cuh"
//void load_data()
//{
// FILE* f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-images.idx3-ubyte", "rb");
// FILE* f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-labels.idx1-ubyte", "rb");
//
// int tmp;
//
// int magic_num;
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// // printf("debug:%d\n",swap_endian(magic_num));
//
// int train_size;
// fread(&train_size, sizeof(int), 1, f_images);
// fread(&train_size, sizeof(int), 1, f_labels);
// train_size = swap_endian(train_size);
//
// // printf("debug:%d\n",swap_endian(train_size));
//
// int rows, cols;
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// // printf("debug:%d\n",swap_endian(rows));
// // printf("debug:%d\n",swap_endian(cols));
//
// for (int i = 0;i < train_size;i++)
// {
// fread(&train_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Training labels : Already read %5d labels\r", i);
// // printf("%d:debug:%d\r",i,train_label[i]);
// // system("pause");
// }
// printf("Training labels : Already read %5d labels\n", train_size);
//
// for (int i = 0;i < train_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// train_image[i][j][k] = tmp;
// train_image[i][j][k] /= 255;
// // printf("%d %d %d debug: %f\n",i,j,k,train_image[i][j][k]);
// // system("pause");
// }
// if (i % 1000 == 0)
// printf("Training images : Already read %5d images\r", i);
// }
// printf("Training images : Already read %5d images\n", train_size);
//
// fclose(f_images);
// fclose(f_labels);
//
// f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-images.idx3-ubyte", "rb");
// f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-labels.idx1-ubyte", "rb");
//
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// int test_size;
// fread(&test_size, sizeof(int), 1, f_images);
// fread(&test_size, sizeof(int), 1, f_labels);
// test_size = swap_endian(test_size);
//
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// for (int i = 0;i < test_size;i++)
// {
// fread(&test_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Testing labels : Already read %5d labels\r", i);
// }
// printf("Testing labels : Already read %5d labels\n", test_size);
//
// for (int i = 0;i < test_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// test_image[i][j][k] = tmp;
// test_image[i][j][k] /= 255;
// }
// if (i % 1000 == 0)
// printf("Testing images : Already read %5d images\r", i);
// }
// printf("Testing images : Already read %5d images\n\n", test_size);
//
// fclose(f_images);
// fclose(f_labels);
//}
//
//void export_params()
//{
// FILE* f_params = fopen("./params.txt", "w");
//
// fprintf(f_params, "6\n");
//
// fprintf(f_params, "conv1bias 0 6 ");
// for (int i = 0;i < CONV_W_NUM;i++)
// fprintf(f_params, "%X ", *(int*)& conv_b[i]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "conv1filter 0 150 ");
// for (int i = 0;i < CONV_W_NUM;i++)
// for (int j = 0;j < CONV_W_SIZE;j++)
// for (int k = 0;k < CONV_W_SIZE;k++)
// fprintf(f_params, "%X ", *(int*)& conv_w[i][j][k]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip1bias 0 45 ");
// for (int i = 0;i < FC1_SIZE;i++)
// fprintf(f_params, "%X ", *(int*)& fc1_b[i]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip1filter 0 38880 ");
// for (int i = 0;i < FC1_SIZE;i++)
// for (int j = 0;j < CONV_W_NUM;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// for (int l = 0;l < POOL_SIZE;l++)
// fprintf(f_params, "%X ", *(int*)& fc1_w[i][j][k][l]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip2bias 0 10 ");
// for (int i = 0;i < FC2_SIZE;i++)
// fprintf(f_params, "%X ", *(int*)& fc2_b[i]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip2filter 0 450 ");
// for (int i = 0;i < FC2_SIZE;i++)
// for (int j = 0;j < FC1_SIZE;j++)
// fprintf(f_params, "%X ", *(int*)& fc2_w[i][j]);
//
// fclose(f_params);
//
//}
//
////#include "global_gpu.cuh"
////#include "utils_gpu.cuh"
////#include "init_gpu.cuh"
//
//void init_data_gpu()
//{
// CHECK(hipMemcpyToSymbol(_train_image, train_image, TRAIN_NUM * ROW * COL * sizeof(float)));
// CHECK(hipMemcpyToSymbol(_train_label, train_label, sizeof(train_label)));
// CHECK(hipMemcpyToSymbol(_test_image, test_image, TEST_NUM * ROW * COL * sizeof(float)));
// CHECK(hipMemcpyToSymbol(_test_label, test_label, sizeof(test_label)));
//}
//
//__global__ void init_conv_b(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// hiprandState_t state;
// hiprand_init(seed, ix, 0, &state);
// float rn = _get_rand(abs((int)hiprand(&state)) % RAND_MAX, CONV_W_SIZE * CONV_W_SIZE);
// if (ix < CONV_W_NUM)
// _conv_b[ix] = rn;
//}
//
//__global__ void init_conv_w(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
// int idx = ix + iy * CONV_W_SIZE + iz * CONV_W_SIZE * CONV_W_SIZE;
// hiprandState_t state;
// hiprand_init(seed, idx, 0, &state);
// float rn = _get_rand(abs((int)hiprand(&state)) % RAND_MAX, CONV_W_SIZE * CONV_W_SIZE);
// if (ix < CONV_W_NUM && iy < CONV_W_SIZE && iz < CONV_W_SIZE)
// _conv_w[ix][iy][iz] = rn;
//}
//
//__global__ void init_fc1_b(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// hiprandState_t state;
// hiprand_init(seed, ix, 0, &state);
// float rn = _get_rand(abs((int)hiprand(&state)) % RAND_MAX, POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// if (ix < FC1_SIZE)
// _fc1_b[ix] = rn;
//}
//
//__global__ void init_fc1_w(int seed, int i)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
// int idx = ix + iy * POOL_SIZE + iz * POOL_SIZE * POOL_SIZE;
// hiprandState_t state;
// hiprand_init(seed, idx, 0, &state);
// float rn = _get_rand(abs((int)hiprand(&state)) % RAND_MAX, POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// if (ix < CONV_W_NUM && iy < POOL_SIZE && iz < POOL_SIZE)
// _fc1_w[i][ix][iy][iz] = rn;
//}
//
//__global__ void init_fc2_b(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// hiprandState_t state;
// hiprand_init(seed, ix, 0, &state);
// float rn = _get_rand(abs((int)hiprand(&state)) % RAND_MAX, FC1_SIZE);
// if (ix < FC2_SIZE)
// _fc2_b[ix] = rn;
//}
//
//__global__ void init_fc2_w(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int idx = ix + iy * FC1_SIZE;
// hiprandState_t state;
// hiprand_init(seed, idx, 0, &state);
// float rn = _get_rand(abs((int)hiprand(&state)) % RAND_MAX, FC1_SIZE);
// if (ix < FC2_SIZE && iy < FC1_SIZE)
// _fc2_w[ix][iy] = rn;
//}
//
//void init_params_gpu()
//{
// srand((unsigned)time(NULL));
//
// dim3 block1(32);
// dim3 grid1((CONV_W_NUM - 1) / block1.x + 1);
// dim3 block2(32, 32, 32);
// dim3 grid2((CONV_W_NUM - 1) / block2.x + 1, (CONV_W_SIZE - 1) / block2.y + 1, (CONV_W_SIZE - 1) / block2.z + 1);
// dim3 block3(32);
// dim3 grid3((FC1_SIZE - 1) / block3.x + 1);
// dim3 block4(32, 32, 32);
// dim3 grid4((CONV_W_NUM - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1);
// dim3 block5(32);
// dim3 grid5((FC2_SIZE - 1) / block5.x + 1);
// dim3 block6(32, 32);
// dim3 grid6((FC2_SIZE - 1) / block6.x + 1, (FC1_SIZE - 1) / block6.y + 1);
//
// init_conv_b << <block1, grid1 >> > (rand());
// init_conv_w << <block2, grid2 >> > (rand());
// init_fc1_b << <block3, grid3 >> > (rand());
//
//#pragma omp parallel for
// for (int i = 0;i < FC1_SIZE;i++)
// init_fc1_w << <block4, grid4 >> > (rand(), i);
// init_fc2_b << <block5, grid5 >> > (rand());
// init_fc2_w << <block6, grid6 >> > (rand());
// hipDeviceSynchronize();
//}
////#include "test_gpu.cuh"
//__global__ void test_gpu()
//{
// printf("%f %d %d\n", _alpha, _epochs, _minibatch);
// printf("%d\n", tmp);
// tmp = 18;
// printf("%d\n", tmp);
//}
//
//__global__ void test_gpu1()
//{
// printf("====\n");
// printf("%d\n", tmp);
// tmp = 19;
// printf("%d\n", tmp);
//}
////#include "fp_gpu.cuh"
//
//__global__ void _set_input_train(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _train_image[idx][ix][iy];
// }
//}
//
//__global__ void _set_input_test(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _test_image[idx][ix][iy];
// }
//}
//
//void set_input_gpu_train(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_train << <block, grid >> > (idx);
// hipDeviceSynchronize();
//}
//
//void set_input_gpu_test(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_test << <block, grid >> > (idx);
// hipDeviceSynchronize();
//}
//
//__global__ void _input_conv()
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
// if (ix < CONV_W_NUM && iy < CONV_SIZE && iz < CONV_SIZE)
// {
// _conv_z[ix][iy][iz] = 0;
// // #pragma unroll
// for (int l = 0;l < CONV_W_SIZE;l++)
// for (int m = 0;m < CONV_W_SIZE;m++)
// _conv_z[ix][iy][iz] += _input[iy + l][iz + m] * _conv_w[ix][l][m];
// _conv_z[ix][iy][iz] += _conv_b[ix];
// _conv_a[ix][iy][iz] = _sigmoid(_conv_z[ix][iy][iz]);
// }
//}
//
//void input_conv_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_SIZE - 1) / block.y + 1, (CONV_SIZE - 1) / block.z + 1);
// _input_conv << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _conv_pool()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// int k = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < CONV_W_NUM && j < POOL_SIZE && k < POOL_SIZE)
// {
// float _max = _conv_a[i][j * 2][k * 2];
// _pool_pos[i][j][k] = 0;
// if (_conv_a[i][j * 2][k * 2 + 1] > _max)
// {
// _max = _conv_a[i][j * 2][k * 2 + 1];
// _pool_pos[i][j][k] = 1;
// }
// if (_conv_a[i][j * 2 + 1][k * 2] > _max)
// {
// _max = _conv_a[i][j * 2 + 1][k * 2];
// _pool_pos[i][j][k] = 2;
// }
// if (_conv_a[i][j * 2 + 1][k * 2 + 1] > _max)
// {
// _max = _conv_a[i][j * 2 + 1][k * 2 + 1];
// _pool_pos[i][j][k] = 3;
// }
// _pool[i][j][k] = _max;
// }
//}
//
//void conv_pool_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1, (POOL_SIZE - 1) / block.y + 1, (POOL_SIZE - 1) / block.z + 1);
// _conv_pool << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _pool_fc1()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// _fc1_z[i] = 0;
// for (int j = 0;j < CONV_W_NUM;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// for (int l = 0;l < POOL_SIZE;l++)
// _fc1_z[i] += _pool[j][k][l] * _fc1_w[i][j][k][l];
// _fc1_z[i] += _fc1_b[i];
// _fc1_a[i] = _sigmoid(_fc1_z[i]);
// }
//}
//
//void pool_fc1_gpu()
//{
// dim3 block(32);
// dim3 grid((FC1_SIZE - 1) / block.x + 1);
// _pool_fc1 << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _fc1_fc2()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_z[i] = 0;
// for (int j = 0;j < FC1_SIZE;j++)
// _fc2_z[i] += _fc1_a[j] * _fc2_w[i][j];
// _fc2_z[i] += _fc2_b[i];
// _fc2_a[i] = _sigmoid(_fc2_z[i]);
// }
//}
//
//void fc1_fc2_gpu()
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _fc1_fc2 << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _set_answer_train(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_train_label[idx] == i) ? 1 : 0;
// }
//}
//
//__global__ void _set_answer_test(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_test_label[idx] == i) ? 1 : 0;
// }
//}
//
//void set_answer_gpu_train(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_train << <block, grid >> > (idx);
// hipDeviceSynchronize();
//}
//
//void set_answer_gpu_test(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_test << <block, grid >> > (idx);
// hipDeviceSynchronize();
//}
//
//__global__ void _check_answer_get_error()
//{
// float _max = _output[0];
// int max_pos = 0;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// if (_max < _output[i])
// {
// _max = _output[i];
// max_pos = i;
// }
// }
// if (_answer[max_pos])
// _correct_cnt++;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// _C[i] = _output[i] - _answer[i];
// _avg_error += _C[i] * _C[i] * 0.5;
// }
//}
//
//void check_answer_get_error_gpu()
//{
// _check_answer_get_error << <1, 1 >> > ();
// hipDeviceSynchronize();
//}
////#include "bp_gpu.cuh"
//
//__global__ void _update_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_delta[i] = _alpha * _C[i] * (_fc2_a[i] * (1.0 - _fc2_a[i]));
// _fc2_db[i] += _fc2_delta[i];
// }
//}
//
//void update_fc2_b_gpu()
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _update_fc2_b << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _update_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// _fc2_dw[i][j] += _fc2_delta[i] * _fc1_a[j];
//}
//
//void update_fc2_w_gpu()
//{
// dim3 block(32, 32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1, (FC1_SIZE - 1) / block.x + 1);
// _update_fc2_w << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _update_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// float error = 0;
// for (int j = 0;j < FC2_SIZE;j++)
// error += _fc2_delta[j] * _fc2_w[j][i];
// _fc1_delta[i] = error * (_fc1_a[i] * (1.0 - _fc1_a[i]));
// _fc1_db[i] += _fc1_delta[i];
// }
//}
//
//void update_fc1_b_gpu()
//{
// dim3 block(32);
// dim3 grid((FC1_SIZE - 1) / block.x + 1);
// _update_fc1_b << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _update_fc1_w(int j)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < FC1_SIZE && k < POOL_SIZE && l < POOL_SIZE)
// _fc1_dw[i][j][k][l] += _fc1_delta[i] * _pool[j][k][l];
//}
//
//void update_fc1_w_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((FC1_SIZE - 1) / block.x + 1, (POOL_SIZE - 1) / block.y + 1, (POOL_SIZE - 1) / block.z + 1);
//
// // #pragma omp parallel for
// for (int j = 0;j < CONV_W_NUM;j++)
// _update_fc1_w << <block, grid >> > (j);
// hipDeviceSynchronize();
//}
//
//__global__ void _update_conv_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < CONV_W_NUM)
// {
// _conv_sigma_delta[i] = 0;
// for (int j = 0;j < POOL_SIZE;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// {
// float error = 0;
// _conv_delta[i][j][k] = 0;
// for (int l = 0;l < FC1_SIZE;l++)
// error += _fc1_delta[l] * _fc1_w[l][i][j][k];
// _conv_delta[i][j][k] = error * (_pool[i][j][k] * (1.0 - _pool[i][j][k]));
// _conv_sigma_delta[i] += error * (_pool[i][j][k] * (1.0 - _pool[i][j][k]));
// }
// _conv_db[i] += _conv_sigma_delta[i];
// }
//}
//
//void update_conv_b_gpu()
//{
// dim3 block(32);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1);
// _update_conv_b << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void _update_conv_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// int k = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < CONV_W_NUM && j < CONV_W_SIZE && k < CONV_W_SIZE)
// {
// float error = 0;
// for (int m = 0;m < POOL_SIZE;m++)
// for (int n = 0;n < POOL_SIZE;n++)
// {
// int x = _pool_pos[i][m][n] / 2;
// int y = _pool_pos[i][m][n] % 2;
// error += _conv_delta[i][m][n] * _input[2 * m + j + x][2 * n + k + y];
// }
// _conv_dw[i][j][k] += error;
// }
//}
//
//void update_conv_w_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_W_SIZE - 1) / block.y + 1, (CONV_W_SIZE - 1) / block.z + 1);
// _update_conv_w << <block, grid >> > ();
// hipDeviceSynchronize();
//}
//
//__global__ void assign_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_b[i] -= (_fc2_db[i] / _minibatch);
// _fc2_db[i] = 0;
// }
//}
//
//__global__ void assign_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// {
// _fc2_w[i][j] -= (_fc2_dw[i][j] / _minibatch);
// _fc2_dw[i][j] = 0;
// }
//}
//
//__global__ void assign_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// _fc1_b[i] -= (_fc1_db[i] / _minibatch);
// _fc1_db[i] = 0;
// }
//}
//
//__global__ void assign_fc1_w(int j)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < FC1_SIZE && k < POOL_SIZE && l < POOL_SIZE)
// {
// _fc1_w[i][j][k][l] -= (_fc1_dw[i][j][k][l] / _minibatch);
// _fc1_dw[i][j][k][l] = 0;
// }
//}
//
//__global__ void assign_conv_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < CONV_W_NUM)
// {
// _conv_b[i] -= (_conv_db[i] / _minibatch);
// _conv_db[i] = 0;
// }
//}
//
//__global__ void assign_conv_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int l = threadIdx.y + blockDim.y * blockIdx.y;
// int m = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < CONV_W_NUM && l < CONV_W_SIZE && m < CONV_W_SIZE)
// {
// _conv_w[i][l][m] -= (_conv_dw[i][l][m] / _minibatch);
// _conv_dw[i][l][m] = 0;
// }
//}
//
//void assign_grads_gpu()
//{
// dim3 block1(32);
// dim3 grid1((FC2_SIZE - 1) / block1.x + 1);
// assign_fc2_b << <block1, grid1 >> > ();
//
// dim3 block2(32, 32);
// dim3 grid2((FC2_SIZE - 1) / block2.x + 1, (FC1_SIZE - 1) / block2.y + 1);
// assign_fc2_w << <block2, grid2 >> > ();
//
// dim3 block3(32);
// dim3 grid3((FC1_SIZE - 1) / block3.x + 1);
// assign_fc1_b << <block3, grid3 >> > ();
//
// dim3 block4(8, 8, 8);
// dim3 grid4((FC1_SIZE - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1);
// for (int j = 0;j < CONV_W_NUM;j++)
// assign_fc1_w << <block4, grid4 >> > (j);
//
// dim3 block5(32);
// dim3 grid5((CONV_W_NUM - 1) / block5.x + 1);
// assign_conv_b << <block5, grid5 >> > ();
//
// dim3 block6(8, 8, 8);
// dim3 grid6((CONV_W_NUM - 1) / block6.x + 1, (CONV_W_SIZE - 1) / block6.y + 1, (CONV_W_SIZE - 1) / block6.z + 1);
// assign_conv_w << <block6, grid6 >> > ();
//
// hipDeviceSynchronize();
//}
//
//int correct_cnt;
//float avg_error;
//float max_acc;
//
//__global__ void _test()
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
//
// for (int i = 5000;i < 5001;i++)
// for (int j = 0;j < ROW;j++)
// {
// for (int k = 0;k < COL;k++)
// printf("%f ", _test_image[i][j][k]);
// printf("\n");
// }
// printf("%d", _test_label[5000]);
//
// // printf("%f ",_test_image[ix][iy][iz]);
//}
//
//void test()
//{
// puts("");
// puts("debug1");
// dim3 block(1, 1, 1);
// dim3 grid(1, 1, 1);
// _test << <block, grid >> > ();
// puts("debug2");
// hipDeviceSynchronize();
// puts("debug3");
//}
//#define BASE_TYPE int
//#define N 1000
//#define M 64
//__global__ void scalMult(const BASE_TYPE * A, const BASE_TYPE * B, BASE_TYPE * C) {
// BASE_TYPE sum = 0;
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// sum = A[idx] * B[idx];
// atomicAdd(C, sum);
//}
//
//void scal(int* dev_a, int* dev_b, int* dev_c, dim3 blocksPerGrid) {
// scalMult << <blocksPerGrid, M >> > (dev_a, dev_b, dev_c);
//}
//int main2(int argc, char* argv[])
//{
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
//
// int host_a[N], host_b[N];
// int* host_c = (int*)malloc(sizeof(int));
// int* dev_a, * dev_b, * dev_c, * dev_res;
// cout << "a" << " " << "b" << endl;
// for (int i = 0; i < N; i++)
// {
// host_a[i] = rand() % 10;
// host_b[i] = rand() % 10;
// //cout << host_a[i] << " " << host_b[i] << endl;
// }
// hipMalloc((void**)& dev_a, N * sizeof(int));
// hipMalloc((void**)& dev_b, N * sizeof(int));
// hipMalloc((void**)& dev_c, sizeof(int));
// hipMemcpy(dev_a, host_a, N * sizeof(int), hipMemcpyHostToDevice);
// hipMemcpy(dev_b, host_b, N * sizeof(int), hipMemcpyHostToDevice);
// hipMemset(dev_c, 0, sizeof(int));
// //dim3 threadsPerBlock = dim3(BS, BS);
// dim3 blocksPerGrid = dim3(N / M);
// hipEventRecord(start, 0);
// scal(dev_a, dev_b, dev_c, blocksPerGrid);
//
// //
// hipEventRecord(stop, 0);
// hipEventSynchronize(stop);
// float KernelTime;
// hipEventElapsedTime(&KernelTime, start, stop);
// printf("KernelTme: %.2f millseconds\n", KernelTime);
// hipMemcpy(host_c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
// printf("Result: %d", host_c[0]);
// hipFree(dev_a);
// hipFree(dev_b);
// hipFree(dev_c);
// hipEventDestroy(start);
// hipEventDestroy(stop);
// printf("====== aininot260 [email protected] ======\n");
// printf(" Processor used : %s\n", argv[1]);
// printf(" Learning rate : %.2f\n", alpha);
// printf(" Epochs : %d\n", epochs);
// printf(" Batch size : %d\n", minibatch);
// printf("========================================\n");
// printf("\n");
//
// load_data();
//
// clock_t t = clock();
//
// //initDevice(0);
// CHECK(hipMemcpyToSymbol(_alpha, &alpha, sizeof(float)));
// CHECK(hipMemcpyToSymbol(_minibatch, &minibatch, sizeof(int)))
// CHECK(hipMemcpyToSymbol(_epochs, &epochs, sizeof(int)));
// init_data_gpu();
// set_input_gpu_train(1);
// init_params_gpu();
//
// for (int i = 1;i <= epochs;i++)
// {
//
// int value1 = 0;
// float value2 = 0;
// hipMemcpy((void*)& _correct_cnt, &value1, sizeof(int), hipMemcpyHostToDevice);
// CHECK(hipMemcpyToSymbol(_correct_cnt,&value1,sizeof(int)));
// hipMemcpy((void*)& _avg_error, &value2, sizeof(int), hipMemcpyHostToDevice);
// CHECK(hipMemcpyToSymbol(_avg_error,&value2,sizeof(float)));
// //hipMemcpyToSymbol(_correct_cnt, &value1, sizeof(int));
// //hipMemcpyToSymbol(_avg_error, &value2, sizeof(float));
// hipDeviceSynchronize();
//
// for (int j = 0;j < TRAIN_NUM;j++)
// {
// set_input_gpu_train(j);
// input_conv_gpu();
// conv_pool_gpu();
// pool_fc1_gpu();
// fc1_fc2_gpu();
// set_answer_gpu_train(j);
// check_answer_get_error_gpu();
//
// update_fc2_b_gpu();
// update_fc2_w_gpu();
// update_fc1_b_gpu();
// update_fc1_w_gpu();
// update_conv_b_gpu();
// update_conv_w_gpu();
// if ((j + 1) % minibatch == 0)
// assign_grads_gpu();
//
// if (j && j % 100 == 0)
// {
//
// hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100, i);
// }
// }
//
// hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM, ((float)correct_cnt / TRAIN_NUM) * 100, (avg_error / TRAIN_NUM) * 100, i);
//
// correct_cnt = 0;
// avg_error = 0;
// hipMemcpyToSymbol(_correct_cnt, &correct_cnt, sizeof(int));
// hipMemcpyToSymbol(_avg_error, &avg_error, sizeof(float));
//
// for (int j = 0;j < TEST_NUM;j++)
// {
// set_input_gpu_test(j);
// input_conv_gpu();
// conv_pool_gpu();
// pool_fc1_gpu();
// fc1_fc2_gpu();
// set_answer_gpu_test(j);
// check_answer_get_error_gpu();
//
// if (j && j % 100 == 0)
// {
// hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100);
// }
// }
// hipMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// hipMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TEST_NUM, ((float)correct_cnt / TEST_NUM) * 100, (avg_error / TEST_NUM) * 100);
//
// if ((float)correct_cnt / TEST_NUM * 100 > max_acc)
// {
// max_acc = (float)correct_cnt / TEST_NUM * 100;
// //export_params();
// printf("The new model has been exported.Accuracy has reached to %0.5f%%\n\n", max_acc);
// }
// else
// {
// alpha = alpha - (alpha / 3);
// hipMemcpyToSymbol(_alpha, &alpha, sizeof(float));
// printf("Learning rate has been reduced to %f\n\n", alpha);
// }
// }
// return 0;
//}
|
9273280fec0f37623dd7319b55c06b8db3f281f1.cu
|
//#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <cuda.h>
//using namespace std;
//#include <ctime>
//#include "cuda_runtime.h"
//#include "curand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
////#include "global.cuh"
//#define TRAIN_NUM 60000
//#define TEST_NUM 10000
//#define ROW 28
//#define COL 28
//#define CONV_SIZE 24
//#define POOL_SIZE 12
//#define FC1_SIZE 45
//#define FC2_SIZE 10
//#define CONV_W_SIZE 5
//#define CONV_W_NUM 6
//
//__constant__ float _alpha;
//__constant__ int _minibatch;
//__constant__ int _epochs;
//
//__device__ int _correct_cnt;
//__device__ float _avg_error;
//
//__device__ float _train_image[TRAIN_NUM][ROW][COL];
//__device__ int _train_label[TRAIN_NUM];
//__device__ float _test_image[TEST_NUM][ROW][COL];
//__device__ int _test_label[TEST_NUM];
//
//__device__ float _conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//__device__ float _conv_b[CONV_W_NUM];
//__device__ float _fc1_b[FC1_SIZE];
//__device__ float _fc1_w[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _fc2_b[FC2_SIZE];
//__device__ float _fc2_w[FC2_SIZE][FC1_SIZE];
//
//__device__ float _input[ROW][COL];
//__device__ float _conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//__device__ float _conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//__device__ int _pool_pos[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _pool[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _fc1_z[FC1_SIZE];
//__device__ float _fc1_a[FC1_SIZE];
//__device__ float _fc2_z[FC2_SIZE];
//__device__ float _fc2_a[FC2_SIZE];
//__device__ float _output[FC2_SIZE];
//__device__ int _answer[FC2_SIZE];
//
//__device__ float _conv_dw[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//__device__ float _conv_db[CONV_W_NUM];
//__device__ float _fc1_db[FC1_SIZE];
//__device__ float _fc1_dw[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//__device__ float _fc2_db[FC2_SIZE];
//__device__ float _fc2_dw[FC2_SIZE][FC1_SIZE];
//__device__ float _C[FC2_SIZE];
//__device__ float _fc2_delta[FC2_SIZE];
//__device__ float _fc1_delta[FC1_SIZE];
//__device__ float _conv_sigma_delta[CONV_W_NUM];
//__device__ float _conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//
//__device__ int tmp;
//
//
//float alpha = 0.2;
//int epochs = 5;
//int minibatch = 1;
//
//float train_image[TRAIN_NUM][ROW][COL];
//int train_label[TRAIN_NUM];
//float test_image[TEST_NUM][ROW][COL];
//int test_label[TEST_NUM];
//
//float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//float conv_b[CONV_W_NUM];
//float fc1_b[FC1_SIZE];
//float fc1_w[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float fc2_b[FC2_SIZE];
//float fc2_w[FC2_SIZE][FC1_SIZE];
//
//float input[ROW][COL];
//float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//float conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//int pool_pos[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float pool[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float fc1_z[FC1_SIZE];
//float fc1_a[FC1_SIZE];
//float fc2_z[FC2_SIZE];
//float fc2_a[FC2_SIZE];
//float output[FC2_SIZE];
//int answer[FC2_SIZE];
//
//float conv_dw[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//float conv_db[CONV_W_NUM];
//float fc1_db[FC1_SIZE];
//float fc1_dw[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//float fc2_db[FC2_SIZE];
//float fc2_dw[FC2_SIZE][FC1_SIZE];
//float C[FC2_SIZE];
//float fc2_delta[FC2_SIZE];
//float fc1_delta[FC1_SIZE];
//float conv_sigma_delta[CONV_W_NUM];
//float conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE];
//
//#define CHECK(call)\
//{\
// const cudaError_t error=call;\
// if(error!=cudaSuccess)\
// {\
// printf("ERROR: %s:%d,",__FILE__,__LINE__);\
// printf("code:%d,reason:%s\n",error,cudaGetErrorString(error));\
// exit(1);\
// }\
//}
//int swap_endian(int val)
//{
// unsigned char c1, c2, c3, c4;
// c1 = val & 255;
// c2 = (val >> 8) & 255;
// c3 = (val >> 16) & 255;
// c4 = (val >> 24) & 255;
// return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4;
//}
//
//float get_rand(float fan_in)
//{
// float sum = 0;
// for (int i = 0;i < 12;i++)
// sum += (float)rand() / RAND_MAX;
// sum -= 6;
// sum *= 1 / sqrt(fan_in);
// return sum;
//}
//void initDevice(int devNum)
//{
// int dev = devNum;
// cudaDeviceProp deviceProp;
// CHECK(cudaGetDeviceProperties(&deviceProp, dev));
// printf("Using device %d: %s\n", dev, deviceProp.name);
// CHECK(cudaSetDevice(dev));
//}
//
//__device__ float _get_rand(int _rand, float fan_in)
//{
// float sum = 0;
// for (int i = 0;i < 12;i++)
// sum += (float)_rand / RAND_MAX;
// sum -= 6;
// sum *= 1 / sqrt(fan_in);
// return sum;
//}
//
//__device__ float _sigmoid(float x)
//{
// return (1 / (1 + exp(-1 * x)));
//}
//
////#include "io.cuh"
//void load_data()
//{
// FILE* f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-images.idx3-ubyte", "rb");
// FILE* f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-labels.idx1-ubyte", "rb");
//
// int tmp;
//
// int magic_num;
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// // printf("debug:%d\n",swap_endian(magic_num));
//
// int train_size;
// fread(&train_size, sizeof(int), 1, f_images);
// fread(&train_size, sizeof(int), 1, f_labels);
// train_size = swap_endian(train_size);
//
// // printf("debug:%d\n",swap_endian(train_size));
//
// int rows, cols;
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// // printf("debug:%d\n",swap_endian(rows));
// // printf("debug:%d\n",swap_endian(cols));
//
// for (int i = 0;i < train_size;i++)
// {
// fread(&train_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Training labels : Already read %5d labels\r", i);
// // printf("%d:debug:%d\r",i,train_label[i]);
// // system("pause");
// }
// printf("Training labels : Already read %5d labels\n", train_size);
//
// for (int i = 0;i < train_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// train_image[i][j][k] = tmp;
// train_image[i][j][k] /= 255;
// // printf("%d %d %d debug: %f\n",i,j,k,train_image[i][j][k]);
// // system("pause");
// }
// if (i % 1000 == 0)
// printf("Training images : Already read %5d images\r", i);
// }
// printf("Training images : Already read %5d images\n", train_size);
//
// fclose(f_images);
// fclose(f_labels);
//
// f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-images.idx3-ubyte", "rb");
// f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-labels.idx1-ubyte", "rb");
//
// fread(&magic_num, sizeof(int), 1, f_images);
// fread(&magic_num, sizeof(int), 1, f_labels);
//
// int test_size;
// fread(&test_size, sizeof(int), 1, f_images);
// fread(&test_size, sizeof(int), 1, f_labels);
// test_size = swap_endian(test_size);
//
// fread(&rows, sizeof(int), 1, f_images);
// fread(&cols, sizeof(int), 1, f_images);
// rows = swap_endian(rows);
// cols = swap_endian(cols);
//
// for (int i = 0;i < test_size;i++)
// {
// fread(&test_label[i], 1, 1, f_labels);
// if (i % 1000 == 0)
// printf("Testing labels : Already read %5d labels\r", i);
// }
// printf("Testing labels : Already read %5d labels\n", test_size);
//
// for (int i = 0;i < test_size;i++)
// {
// for (int j = 0;j < rows;j++)
// for (int k = 0;k < cols;k++)
// {
// tmp = 0;
// fread(&tmp, 1, 1, f_images);
// test_image[i][j][k] = tmp;
// test_image[i][j][k] /= 255;
// }
// if (i % 1000 == 0)
// printf("Testing images : Already read %5d images\r", i);
// }
// printf("Testing images : Already read %5d images\n\n", test_size);
//
// fclose(f_images);
// fclose(f_labels);
//}
//
//void export_params()
//{
// FILE* f_params = fopen("./params.txt", "w");
//
// fprintf(f_params, "6\n");
//
// fprintf(f_params, "conv1bias 0 6 ");
// for (int i = 0;i < CONV_W_NUM;i++)
// fprintf(f_params, "%X ", *(int*)& conv_b[i]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "conv1filter 0 150 ");
// for (int i = 0;i < CONV_W_NUM;i++)
// for (int j = 0;j < CONV_W_SIZE;j++)
// for (int k = 0;k < CONV_W_SIZE;k++)
// fprintf(f_params, "%X ", *(int*)& conv_w[i][j][k]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip1bias 0 45 ");
// for (int i = 0;i < FC1_SIZE;i++)
// fprintf(f_params, "%X ", *(int*)& fc1_b[i]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip1filter 0 38880 ");
// for (int i = 0;i < FC1_SIZE;i++)
// for (int j = 0;j < CONV_W_NUM;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// for (int l = 0;l < POOL_SIZE;l++)
// fprintf(f_params, "%X ", *(int*)& fc1_w[i][j][k][l]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip2bias 0 10 ");
// for (int i = 0;i < FC2_SIZE;i++)
// fprintf(f_params, "%X ", *(int*)& fc2_b[i]);
// fprintf(f_params, "\n");
//
// fprintf(f_params, "ip2filter 0 450 ");
// for (int i = 0;i < FC2_SIZE;i++)
// for (int j = 0;j < FC1_SIZE;j++)
// fprintf(f_params, "%X ", *(int*)& fc2_w[i][j]);
//
// fclose(f_params);
//
//}
//
////#include "global_gpu.cuh"
////#include "utils_gpu.cuh"
////#include "init_gpu.cuh"
//
//void init_data_gpu()
//{
// CHECK(cudaMemcpyToSymbol(_train_image, train_image, TRAIN_NUM * ROW * COL * sizeof(float)));
// CHECK(cudaMemcpyToSymbol(_train_label, train_label, sizeof(train_label)));
// CHECK(cudaMemcpyToSymbol(_test_image, test_image, TEST_NUM * ROW * COL * sizeof(float)));
// CHECK(cudaMemcpyToSymbol(_test_label, test_label, sizeof(test_label)));
//}
//
//__global__ void init_conv_b(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// curandState state;
// curand_init(seed, ix, 0, &state);
// float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, CONV_W_SIZE * CONV_W_SIZE);
// if (ix < CONV_W_NUM)
// _conv_b[ix] = rn;
//}
//
//__global__ void init_conv_w(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
// int idx = ix + iy * CONV_W_SIZE + iz * CONV_W_SIZE * CONV_W_SIZE;
// curandState state;
// curand_init(seed, idx, 0, &state);
// float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, CONV_W_SIZE * CONV_W_SIZE);
// if (ix < CONV_W_NUM && iy < CONV_W_SIZE && iz < CONV_W_SIZE)
// _conv_w[ix][iy][iz] = rn;
//}
//
//__global__ void init_fc1_b(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// curandState state;
// curand_init(seed, ix, 0, &state);
// float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// if (ix < FC1_SIZE)
// _fc1_b[ix] = rn;
//}
//
//__global__ void init_fc1_w(int seed, int i)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
// int idx = ix + iy * POOL_SIZE + iz * POOL_SIZE * POOL_SIZE;
// curandState state;
// curand_init(seed, idx, 0, &state);
// float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, POOL_SIZE * POOL_SIZE * CONV_W_NUM);
// if (ix < CONV_W_NUM && iy < POOL_SIZE && iz < POOL_SIZE)
// _fc1_w[i][ix][iy][iz] = rn;
//}
//
//__global__ void init_fc2_b(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// curandState state;
// curand_init(seed, ix, 0, &state);
// float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, FC1_SIZE);
// if (ix < FC2_SIZE)
// _fc2_b[ix] = rn;
//}
//
//__global__ void init_fc2_w(int seed)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int idx = ix + iy * FC1_SIZE;
// curandState state;
// curand_init(seed, idx, 0, &state);
// float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, FC1_SIZE);
// if (ix < FC2_SIZE && iy < FC1_SIZE)
// _fc2_w[ix][iy] = rn;
//}
//
//void init_params_gpu()
//{
// srand((unsigned)time(NULL));
//
// dim3 block1(32);
// dim3 grid1((CONV_W_NUM - 1) / block1.x + 1);
// dim3 block2(32, 32, 32);
// dim3 grid2((CONV_W_NUM - 1) / block2.x + 1, (CONV_W_SIZE - 1) / block2.y + 1, (CONV_W_SIZE - 1) / block2.z + 1);
// dim3 block3(32);
// dim3 grid3((FC1_SIZE - 1) / block3.x + 1);
// dim3 block4(32, 32, 32);
// dim3 grid4((CONV_W_NUM - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1);
// dim3 block5(32);
// dim3 grid5((FC2_SIZE - 1) / block5.x + 1);
// dim3 block6(32, 32);
// dim3 grid6((FC2_SIZE - 1) / block6.x + 1, (FC1_SIZE - 1) / block6.y + 1);
//
// init_conv_b << <block1, grid1 >> > (rand());
// init_conv_w << <block2, grid2 >> > (rand());
// init_fc1_b << <block3, grid3 >> > (rand());
//
//#pragma omp parallel for
// for (int i = 0;i < FC1_SIZE;i++)
// init_fc1_w << <block4, grid4 >> > (rand(), i);
// init_fc2_b << <block5, grid5 >> > (rand());
// init_fc2_w << <block6, grid6 >> > (rand());
// cudaDeviceSynchronize();
//}
////#include "test_gpu.cuh"
//__global__ void test_gpu()
//{
// printf("%f %d %d\n", _alpha, _epochs, _minibatch);
// printf("%d\n", tmp);
// tmp = 18;
// printf("%d\n", tmp);
//}
//
//__global__ void test_gpu1()
//{
// printf("====\n");
// printf("%d\n", tmp);
// tmp = 19;
// printf("%d\n", tmp);
//}
////#include "fp_gpu.cuh"
//
//__global__ void _set_input_train(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _train_image[idx][ix][iy];
// }
//}
//
//__global__ void _set_input_test(int idx)
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// if (ix < ROW && iy < COL)
// {
// _input[ix][iy] = _test_image[idx][ix][iy];
// }
//}
//
//void set_input_gpu_train(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_train << <block, grid >> > (idx);
// cudaDeviceSynchronize();
//}
//
//void set_input_gpu_test(int idx)
//{
// dim3 block(32, 32);
// dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1);
// _set_input_test << <block, grid >> > (idx);
// cudaDeviceSynchronize();
//}
//
//__global__ void _input_conv()
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
// if (ix < CONV_W_NUM && iy < CONV_SIZE && iz < CONV_SIZE)
// {
// _conv_z[ix][iy][iz] = 0;
// // #pragma unroll
// for (int l = 0;l < CONV_W_SIZE;l++)
// for (int m = 0;m < CONV_W_SIZE;m++)
// _conv_z[ix][iy][iz] += _input[iy + l][iz + m] * _conv_w[ix][l][m];
// _conv_z[ix][iy][iz] += _conv_b[ix];
// _conv_a[ix][iy][iz] = _sigmoid(_conv_z[ix][iy][iz]);
// }
//}
//
//void input_conv_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_SIZE - 1) / block.y + 1, (CONV_SIZE - 1) / block.z + 1);
// _input_conv << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _conv_pool()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// int k = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < CONV_W_NUM && j < POOL_SIZE && k < POOL_SIZE)
// {
// float _max = _conv_a[i][j * 2][k * 2];
// _pool_pos[i][j][k] = 0;
// if (_conv_a[i][j * 2][k * 2 + 1] > _max)
// {
// _max = _conv_a[i][j * 2][k * 2 + 1];
// _pool_pos[i][j][k] = 1;
// }
// if (_conv_a[i][j * 2 + 1][k * 2] > _max)
// {
// _max = _conv_a[i][j * 2 + 1][k * 2];
// _pool_pos[i][j][k] = 2;
// }
// if (_conv_a[i][j * 2 + 1][k * 2 + 1] > _max)
// {
// _max = _conv_a[i][j * 2 + 1][k * 2 + 1];
// _pool_pos[i][j][k] = 3;
// }
// _pool[i][j][k] = _max;
// }
//}
//
//void conv_pool_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1, (POOL_SIZE - 1) / block.y + 1, (POOL_SIZE - 1) / block.z + 1);
// _conv_pool << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _pool_fc1()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// _fc1_z[i] = 0;
// for (int j = 0;j < CONV_W_NUM;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// for (int l = 0;l < POOL_SIZE;l++)
// _fc1_z[i] += _pool[j][k][l] * _fc1_w[i][j][k][l];
// _fc1_z[i] += _fc1_b[i];
// _fc1_a[i] = _sigmoid(_fc1_z[i]);
// }
//}
//
//void pool_fc1_gpu()
//{
// dim3 block(32);
// dim3 grid((FC1_SIZE - 1) / block.x + 1);
// _pool_fc1 << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _fc1_fc2()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_z[i] = 0;
// for (int j = 0;j < FC1_SIZE;j++)
// _fc2_z[i] += _fc1_a[j] * _fc2_w[i][j];
// _fc2_z[i] += _fc2_b[i];
// _fc2_a[i] = _sigmoid(_fc2_z[i]);
// }
//}
//
//void fc1_fc2_gpu()
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _fc1_fc2 << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _set_answer_train(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_train_label[idx] == i) ? 1 : 0;
// }
//}
//
//__global__ void _set_answer_test(int idx)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _output[i] = _fc2_a[i];
// _answer[i] = (_test_label[idx] == i) ? 1 : 0;
// }
//}
//
//void set_answer_gpu_train(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_train << <block, grid >> > (idx);
// cudaDeviceSynchronize();
//}
//
//void set_answer_gpu_test(int idx)
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _set_answer_test << <block, grid >> > (idx);
// cudaDeviceSynchronize();
//}
//
//__global__ void _check_answer_get_error()
//{
// float _max = _output[0];
// int max_pos = 0;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// if (_max < _output[i])
// {
// _max = _output[i];
// max_pos = i;
// }
// }
// if (_answer[max_pos])
// _correct_cnt++;
// for (int i = 0;i < FC2_SIZE;i++)
// {
// _C[i] = _output[i] - _answer[i];
// _avg_error += _C[i] * _C[i] * 0.5;
// }
//}
//
//void check_answer_get_error_gpu()
//{
// _check_answer_get_error << <1, 1 >> > ();
// cudaDeviceSynchronize();
//}
////#include "bp_gpu.cuh"
//
//__global__ void _update_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_delta[i] = _alpha * _C[i] * (_fc2_a[i] * (1.0 - _fc2_a[i]));
// _fc2_db[i] += _fc2_delta[i];
// }
//}
//
//void update_fc2_b_gpu()
//{
// dim3 block(32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1);
// _update_fc2_b << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _update_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// _fc2_dw[i][j] += _fc2_delta[i] * _fc1_a[j];
//}
//
//void update_fc2_w_gpu()
//{
// dim3 block(32, 32);
// dim3 grid((FC2_SIZE - 1) / block.x + 1, (FC1_SIZE - 1) / block.x + 1);
// _update_fc2_w << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _update_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// float error = 0;
// for (int j = 0;j < FC2_SIZE;j++)
// error += _fc2_delta[j] * _fc2_w[j][i];
// _fc1_delta[i] = error * (_fc1_a[i] * (1.0 - _fc1_a[i]));
// _fc1_db[i] += _fc1_delta[i];
// }
//}
//
//void update_fc1_b_gpu()
//{
// dim3 block(32);
// dim3 grid((FC1_SIZE - 1) / block.x + 1);
// _update_fc1_b << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _update_fc1_w(int j)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < FC1_SIZE && k < POOL_SIZE && l < POOL_SIZE)
// _fc1_dw[i][j][k][l] += _fc1_delta[i] * _pool[j][k][l];
//}
//
//void update_fc1_w_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((FC1_SIZE - 1) / block.x + 1, (POOL_SIZE - 1) / block.y + 1, (POOL_SIZE - 1) / block.z + 1);
//
// // #pragma omp parallel for
// for (int j = 0;j < CONV_W_NUM;j++)
// _update_fc1_w << <block, grid >> > (j);
// cudaDeviceSynchronize();
//}
//
//__global__ void _update_conv_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < CONV_W_NUM)
// {
// _conv_sigma_delta[i] = 0;
// for (int j = 0;j < POOL_SIZE;j++)
// for (int k = 0;k < POOL_SIZE;k++)
// {
// float error = 0;
// _conv_delta[i][j][k] = 0;
// for (int l = 0;l < FC1_SIZE;l++)
// error += _fc1_delta[l] * _fc1_w[l][i][j][k];
// _conv_delta[i][j][k] = error * (_pool[i][j][k] * (1.0 - _pool[i][j][k]));
// _conv_sigma_delta[i] += error * (_pool[i][j][k] * (1.0 - _pool[i][j][k]));
// }
// _conv_db[i] += _conv_sigma_delta[i];
// }
//}
//
//void update_conv_b_gpu()
//{
// dim3 block(32);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1);
// _update_conv_b << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void _update_conv_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// int k = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < CONV_W_NUM && j < CONV_W_SIZE && k < CONV_W_SIZE)
// {
// float error = 0;
// for (int m = 0;m < POOL_SIZE;m++)
// for (int n = 0;n < POOL_SIZE;n++)
// {
// int x = _pool_pos[i][m][n] / 2;
// int y = _pool_pos[i][m][n] % 2;
// error += _conv_delta[i][m][n] * _input[2 * m + j + x][2 * n + k + y];
// }
// _conv_dw[i][j][k] += error;
// }
//}
//
//void update_conv_w_gpu()
//{
// dim3 block(8, 8, 8);
// dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_W_SIZE - 1) / block.y + 1, (CONV_W_SIZE - 1) / block.z + 1);
// _update_conv_w << <block, grid >> > ();
// cudaDeviceSynchronize();
//}
//
//__global__ void assign_fc2_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC2_SIZE)
// {
// _fc2_b[i] -= (_fc2_db[i] / _minibatch);
// _fc2_db[i] = 0;
// }
//}
//
//__global__ void assign_fc2_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int j = threadIdx.y + blockDim.y * blockIdx.y;
// if (i < FC2_SIZE && j < FC1_SIZE)
// {
// _fc2_w[i][j] -= (_fc2_dw[i][j] / _minibatch);
// _fc2_dw[i][j] = 0;
// }
//}
//
//__global__ void assign_fc1_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < FC1_SIZE)
// {
// _fc1_b[i] -= (_fc1_db[i] / _minibatch);
// _fc1_db[i] = 0;
// }
//}
//
//__global__ void assign_fc1_w(int j)
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int k = threadIdx.y + blockDim.y * blockIdx.y;
// int l = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < FC1_SIZE && k < POOL_SIZE && l < POOL_SIZE)
// {
// _fc1_w[i][j][k][l] -= (_fc1_dw[i][j][k][l] / _minibatch);
// _fc1_dw[i][j][k][l] = 0;
// }
//}
//
//__global__ void assign_conv_b()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// if (i < CONV_W_NUM)
// {
// _conv_b[i] -= (_conv_db[i] / _minibatch);
// _conv_db[i] = 0;
// }
//}
//
//__global__ void assign_conv_w()
//{
// int i = threadIdx.x + blockDim.x * blockIdx.x;
// int l = threadIdx.y + blockDim.y * blockIdx.y;
// int m = threadIdx.z + blockDim.z * blockIdx.z;
// if (i < CONV_W_NUM && l < CONV_W_SIZE && m < CONV_W_SIZE)
// {
// _conv_w[i][l][m] -= (_conv_dw[i][l][m] / _minibatch);
// _conv_dw[i][l][m] = 0;
// }
//}
//
//void assign_grads_gpu()
//{
// dim3 block1(32);
// dim3 grid1((FC2_SIZE - 1) / block1.x + 1);
// assign_fc2_b << <block1, grid1 >> > ();
//
// dim3 block2(32, 32);
// dim3 grid2((FC2_SIZE - 1) / block2.x + 1, (FC1_SIZE - 1) / block2.y + 1);
// assign_fc2_w << <block2, grid2 >> > ();
//
// dim3 block3(32);
// dim3 grid3((FC1_SIZE - 1) / block3.x + 1);
// assign_fc1_b << <block3, grid3 >> > ();
//
// dim3 block4(8, 8, 8);
// dim3 grid4((FC1_SIZE - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1);
// for (int j = 0;j < CONV_W_NUM;j++)
// assign_fc1_w << <block4, grid4 >> > (j);
//
// dim3 block5(32);
// dim3 grid5((CONV_W_NUM - 1) / block5.x + 1);
// assign_conv_b << <block5, grid5 >> > ();
//
// dim3 block6(8, 8, 8);
// dim3 grid6((CONV_W_NUM - 1) / block6.x + 1, (CONV_W_SIZE - 1) / block6.y + 1, (CONV_W_SIZE - 1) / block6.z + 1);
// assign_conv_w << <block6, grid6 >> > ();
//
// cudaDeviceSynchronize();
//}
//
//int correct_cnt;
//float avg_error;
//float max_acc;
//
//__global__ void _test()
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
//
// for (int i = 5000;i < 5001;i++)
// for (int j = 0;j < ROW;j++)
// {
// for (int k = 0;k < COL;k++)
// printf("%f ", _test_image[i][j][k]);
// printf("\n");
// }
// printf("%d", _test_label[5000]);
//
// // printf("%f ",_test_image[ix][iy][iz]);
//}
//
//void test()
//{
// puts("");
// puts("debug1");
// dim3 block(1, 1, 1);
// dim3 grid(1, 1, 1);
// _test << <block, grid >> > ();
// puts("debug2");
// cudaDeviceSynchronize();
// puts("debug3");
//}
//#define BASE_TYPE int
//#define N 1000
//#define M 64
//__global__ void scalMult(const BASE_TYPE * A, const BASE_TYPE * B, BASE_TYPE * C) {
// BASE_TYPE sum = 0;
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// sum = A[idx] * B[idx];
// atomicAdd(C, sum);
//}
//
//void scal(int* dev_a, int* dev_b, int* dev_c, dim3 blocksPerGrid) {
// scalMult << <blocksPerGrid, M >> > (dev_a, dev_b, dev_c);
//}
//int main2(int argc, char* argv[])
//{
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//
// int host_a[N], host_b[N];
// int* host_c = (int*)malloc(sizeof(int));
// int* dev_a, * dev_b, * dev_c, * dev_res;
// cout << "a" << " " << "b" << endl;
// for (int i = 0; i < N; i++)
// {
// host_a[i] = rand() % 10;
// host_b[i] = rand() % 10;
// //cout << host_a[i] << " " << host_b[i] << endl;
// }
// cudaMalloc((void**)& dev_a, N * sizeof(int));
// cudaMalloc((void**)& dev_b, N * sizeof(int));
// cudaMalloc((void**)& dev_c, sizeof(int));
// cudaMemcpy(dev_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(dev_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemset(dev_c, 0, sizeof(int));
// //dim3 threadsPerBlock = dim3(BS, BS);
// dim3 blocksPerGrid = dim3(N / M);
// cudaEventRecord(start, 0);
// scal(dev_a, dev_b, dev_c, blocksPerGrid);
//
// //
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// float KernelTime;
// cudaEventElapsedTime(&KernelTime, start, stop);
// printf("KernelTme: %.2f millseconds\n", KernelTime);
// cudaMemcpy(host_c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
// printf("Result: %d", host_c[0]);
// cudaFree(dev_a);
// cudaFree(dev_b);
// cudaFree(dev_c);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
// printf("====== aininot260 [email protected] ======\n");
// printf(" Processor used : %s\n", argv[1]);
// printf(" Learning rate : %.2f\n", alpha);
// printf(" Epochs : %d\n", epochs);
// printf(" Batch size : %d\n", minibatch);
// printf("========================================\n");
// printf("\n");
//
// load_data();
//
// clock_t t = clock();
//
// //initDevice(0);
// CHECK(cudaMemcpyToSymbol(_alpha, &alpha, sizeof(float)));
// CHECK(cudaMemcpyToSymbol(_minibatch, &minibatch, sizeof(int)))
// CHECK(cudaMemcpyToSymbol(_epochs, &epochs, sizeof(int)));
// init_data_gpu();
// set_input_gpu_train(1);
// init_params_gpu();
//
// for (int i = 1;i <= epochs;i++)
// {
//
// int value1 = 0;
// float value2 = 0;
// cudaMemcpy((void*)& _correct_cnt, &value1, sizeof(int), cudaMemcpyHostToDevice);
// CHECK(cudaMemcpyToSymbol(_correct_cnt,&value1,sizeof(int)));
// cudaMemcpy((void*)& _avg_error, &value2, sizeof(int), cudaMemcpyHostToDevice);
// CHECK(cudaMemcpyToSymbol(_avg_error,&value2,sizeof(float)));
// //cudaMemcpyToSymbol(_correct_cnt, &value1, sizeof(int));
// //cudaMemcpyToSymbol(_avg_error, &value2, sizeof(float));
// cudaDeviceSynchronize();
//
// for (int j = 0;j < TRAIN_NUM;j++)
// {
// set_input_gpu_train(j);
// input_conv_gpu();
// conv_pool_gpu();
// pool_fc1_gpu();
// fc1_fc2_gpu();
// set_answer_gpu_train(j);
// check_answer_get_error_gpu();
//
// update_fc2_b_gpu();
// update_fc2_w_gpu();
// update_fc1_b_gpu();
// update_fc1_w_gpu();
// update_conv_b_gpu();
// update_conv_w_gpu();
// if ((j + 1) % minibatch == 0)
// assign_grads_gpu();
//
// if (j && j % 100 == 0)
// {
//
// cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100, i);
// }
// }
//
// cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM, ((float)correct_cnt / TRAIN_NUM) * 100, (avg_error / TRAIN_NUM) * 100, i);
//
// correct_cnt = 0;
// avg_error = 0;
// cudaMemcpyToSymbol(_correct_cnt, &correct_cnt, sizeof(int));
// cudaMemcpyToSymbol(_avg_error, &avg_error, sizeof(float));
//
// for (int j = 0;j < TEST_NUM;j++)
// {
// set_input_gpu_test(j);
// input_conv_gpu();
// conv_pool_gpu();
// pool_fc1_gpu();
// fc1_fc2_gpu();
// set_answer_gpu_test(j);
// check_answer_get_error_gpu();
//
// if (j && j % 100 == 0)
// {
// cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100);
// }
// }
// cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int));
// cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float));
// printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TEST_NUM, ((float)correct_cnt / TEST_NUM) * 100, (avg_error / TEST_NUM) * 100);
//
// if ((float)correct_cnt / TEST_NUM * 100 > max_acc)
// {
// max_acc = (float)correct_cnt / TEST_NUM * 100;
// //export_params();
// printf("The new model has been exported.Accuracy has reached to %0.5f%%\n\n", max_acc);
// }
// else
// {
// alpha = alpha - (alpha / 3);
// cudaMemcpyToSymbol(_alpha, &alpha, sizeof(float));
// printf("Learning rate has been reduced to %f\n\n", alpha);
// }
// }
// return 0;
//}
|
y_load.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// y_load.cu
//
// Ghost loading based on threadIdx.y. Requires NX_TILE = NY_TILE.
//
__global__ void
grad_kernel_y_load(const real * __restrict f, real * __restrict u, const real xfactor, const real yfactor,
const real zfactor)
{
__shared__ real fs[NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
const int ghostMul[] = { 0, 0, 0, 1, 1, 1 };
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Ghost zone loading indices
int2 gli = make_int2(-1, -1), gi = make_int2(-1, -1);
if (threadIdx.y < 2 * NGHOST) {
int off = -3 + ghostMul[threadIdx.y] * NY_TILE;
gli.x = xli;
gli.y = yli + off;
gi.x = xi;
gi.y = yi + off;
}
else if (threadIdx.y < 4 * NGHOST) {
int adjidx = threadIdx.y - 2 * NGHOST;
int off = -3 + ghostMul[adjidx] * NY_TILE - 2 * NGHOST;
gli.x = yli + off;
gli.y = xli;
gi.x = blockIdx.x * blockDim.x + yli + off;
gi.y = blockIdx.y * blockDim.y + xli;
}
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Load x-y tile to shared memory
__syncthreads();
fs[yli][xli] = current;
if (gli.x >= 0)
fs[gli.y][gli.x] = f[vfidx(gi.x, gi.y, zi)];
__syncthreads();
// Compute the gradient
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1, forward1, forward2, forward3);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
fs[yli - 3][xli], fs[yli - 2][xli], fs[yli - 1][xli],
fs[yli + 1][xli], fs[yli + 2][xli], fs[yli + 3][xli]);
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
fs[yli][xli - 3], fs[yli][xli - 2], fs[yli][xli - 1],
fs[yli][xli + 1], fs[yli][xli + 2], fs[yli][xli + 3]);
}
}
void
grad_y_load(vf3dgpu &f, vf3dgpu &u)
{
hipLaunchKernelGGL(( grad_kernel_y_load), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
|
y_load.cu
|
// y_load.cu
//
// Ghost loading based on threadIdx.y. Requires NX_TILE = NY_TILE.
//
__global__ void
grad_kernel_y_load(const real * __restrict f, real * __restrict u, const real xfactor, const real yfactor,
const real zfactor)
{
__shared__ real fs[NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
const int ghostMul[] = { 0, 0, 0, 1, 1, 1 };
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Ghost zone loading indices
int2 gli = make_int2(-1, -1), gi = make_int2(-1, -1);
if (threadIdx.y < 2 * NGHOST) {
int off = -3 + ghostMul[threadIdx.y] * NY_TILE;
gli.x = xli;
gli.y = yli + off;
gi.x = xi;
gi.y = yi + off;
}
else if (threadIdx.y < 4 * NGHOST) {
int adjidx = threadIdx.y - 2 * NGHOST;
int off = -3 + ghostMul[adjidx] * NY_TILE - 2 * NGHOST;
gli.x = yli + off;
gli.y = xli;
gi.x = blockIdx.x * blockDim.x + yli + off;
gi.y = blockIdx.y * blockDim.y + xli;
}
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Load x-y tile to shared memory
__syncthreads();
fs[yli][xli] = current;
if (gli.x >= 0)
fs[gli.y][gli.x] = f[vfidx(gi.x, gi.y, zi)];
__syncthreads();
// Compute the gradient
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1, forward1, forward2, forward3);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
fs[yli - 3][xli], fs[yli - 2][xli], fs[yli - 1][xli],
fs[yli + 1][xli], fs[yli + 2][xli], fs[yli + 3][xli]);
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
fs[yli][xli - 3], fs[yli][xli - 2], fs[yli][xli - 1],
fs[yli][xli + 1], fs[yli][xli + 2], fs[yli][xli + 3]);
}
}
void
grad_y_load(vf3dgpu &f, vf3dgpu &u)
{
grad_kernel_y_load<<<xy_tile.nblocks, xy_tile.nthreads>>>(f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
|
c1194f31ec37547ee0e5159188c9006612c065a2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "project_apsp.hpp"
#include "my_process_result.hpp"
#include "sequential_apsp.hpp"
#include "gpu_apsp.hpp"
#include "old_asps.hpp"
#include <vector>
#include <iostream>
char* TEST_GRAPH = "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_1024_1024_graph.txt";
char* TEST_SOLUTION = "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_1024_1024_graph_solution.txt";
void GenerateGraphWithResult(const unsigned size);
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
const float* h_graph,
const unsigned graph_size,
const unsigned beta);
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
void(*ApspKernel)(float*, unsigned, unsigned, unsigned),
const float* h_graph,
const unsigned graph_size,
const unsigned beta);
void GpuFloydWarshallBeta(float *data, int start, int width, const unsigned full_width, const unsigned beta);
void GpuFloydWarshallBeta2(
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta);
int
main(int argc, char** argv) {
#if 0
float* graph = easy_graph_b;
for (unsigned k = 0;
#else
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//GenerateGraphWithResult(4096);
std::vector<float> f;
puts("loading graphs...");
float* graph;
unsigned graph_size = LoadSparseMatrix(
&graph,
TEST_GRAPH
);
float* solution;
LoadSparseMatrix(
&solution,
TEST_SOLUTION
);
unsigned val = 16;
for (unsigned beta = 4; beta <= val; beta *= 2) {
printf("================== Beta: %d =======================\n", beta);
puts("computing...");
MyCudaProcessResult< std::vector<float> > reference_result = DoApspOnGpu(
graph,
graph_size,
beta//32
);
puts("verifying against solution...");
if (GraphsAreEquivalentDefault(reference_result.GetResult().data(), solution, graph_size)) {
puts("The solution is correct - Hooray!");
} else {
puts("Uh oh... SOMETHING WENT WRONG");
}
printf("Solution took %lf seconds to complete.\n\n", reference_result.GetTimeToComplete());
///////////////////////////////////////////////////////////////
puts("computing 2nd version...");
MyCudaProcessResult< std::vector<float> > result = DoApspOnGpu2(
graph,
graph_size,
4 * beta//32
);
printf("\nSolution took %lf seconds to complete.\n\n", result.GetTimeToComplete());
puts("verifying against solution...");
if (GraphsAreEquivalentDefault(result.GetResult().data(), solution, graph_size)) {
puts("The solution is correct - Hooray!");
} else {
puts("Uh oh... SOMETHING WENT WRONG");
}
printf("\n~~~\nSpeedup Factor: %lf\n~~~\n", reference_result.GetTimeToComplete() / result.GetTimeToComplete());
}
free(graph);
free(solution);
#endif
return 0;
}
void GenerateGraphWithResult(const unsigned size) {
char start_file[200];
char solution_file[200];
sprintf(start_file, "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_%d_%d_graph.txt", size, size);
sprintf(solution_file, "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_%d_%d_graph_solution.txt", size, size);
float* graph = NULL;
GenerateErdosRenyiGraph(&graph, size, 6);
WriteSparseMatrix(graph, size, start_file),
NaiveFloydWarshall(graph, size);
WriteSparseMatrix(graph, size, solution_file);
}
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
const float* h_graph,
const unsigned graph_size,
const unsigned beta) {
/**
*
*/
unsigned mem_size = graph_size * graph_size * sizeof(float);
float* d_graph = NULL;
MyCudaProcessResult< std::vector<float> > result;
std::vector<float> h_result(graph_size * graph_size);
result.CudaStatus() = hipMalloc((void**) &d_graph, mem_size);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
result.CudaStatus() = hipMemcpy(
d_graph,
h_graph,
mem_size,
hipMemcpyHostToDevice
);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
GpuFloydWarshallBeta(d_graph, 0, graph_size, graph_size, beta);
hipEventRecord(end);
hipEventSynchronize(end);
result.CudaStatus() = hipMemcpy(
h_result.data(),
d_graph,
mem_size,
hipMemcpyDeviceToHost
);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
float t;
hipEventElapsedTime(&t, start, end);
result.SetResult(h_result);
result.SetTimeToComplete(t / 1000.0);
result.SetSuccess(true);
return result;
}
void GpuFloydWarshallBeta(
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta) {
/**
*
*/
if (width <= beta) {
// the computation now can fit in one block
dim3 threads(width, width);
dim3 grid(1, 1);
hipLaunchKernelGGL(( apsp_seq_I), dim3(grid), dim3(threads) , 0, 0, data, start, width, full_width);
} else if(width <= FAST_GEMM) {
int nw = width / 2; // new width
unsigned shared_mem_size = 2 * beta * beta * sizeof(float); // we need 2 of them
// setup execution parameters
dim3 threadsmult(beta, beta);
dim3 gridmult(nw / beta, nw / beta);
// Do FW for A
GpuFloydWarshallBeta(data, start, nw, full_width, beta);
// execute the kernel B = AB
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw, start, start,start,start+nw, start,0);
// execute the kernel C = CA
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start, start+nw,start,start+nw,start, start,0);
// execute the kernel D += CB
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw,start+nw,start,start+nw, start+nw, start,1);
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the kernel B = BD
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw, start, start+nw,start,start+nw, start+nw,0);
// execute the kernel C = DC
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start, start+nw,start+nw,start+nw,start, start+nw,0);
// execute the kernel A += BC
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start,start,start+nw,start, start, start+nw,1);
} else {
/*A=floyd-warshall(A);
B=AB;
C=CA;
D=D+CB;
D=floyd-warshall(D);
B=BD;
C=DC;
A=A+BC;*/
int nw = width / 2; // new width
// setup execution parameters
dim3 gemmgrid( nw /64, nw/16 );
dim3 gemmthreads( 16, 4 );
// Remember: Column-major
float * A = data + start * full_width + start;
float * B = data + (start+nw) * full_width + start;
float * C = data + start * full_width + (start+nw);
float * D = data + (start+nw) * full_width + (start+nw);
// sgemmNN_MinPlus( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
// no need to send m & n since they are known through grid dimensions !
// Do FW for A
GpuFloydWarshallBeta(data, start, nw, full_width, beta);
// execute the parallel multiplication kernel B = AB
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, A, full_width, B, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = CA
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, C, full_width, A, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel D += CB
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, C, full_width, B, full_width, D, full_width, nw, 1 );
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the parallel multiplication kernel B = BD
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, B, full_width, D, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = DC
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, D, full_width, C, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel A += BC
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, B, full_width, C, full_width, A, full_width, nw, 1 );
}
}
////////////////////////////////////////////////////////////////////////////////
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu2(
const float* h_graph,
const unsigned graph_size,
const unsigned beta) {
/**
*
*/
unsigned mem_size = graph_size * graph_size * sizeof(float);
float* d_graph = NULL;
MyCudaProcessResult< std::vector<float> > result;
std::vector<float> h_result;
result.CudaStatus() = hipMalloc((void**) &d_graph, mem_size);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
result.CudaStatus() = hipMemcpy(
d_graph,
h_graph,
mem_size,
hipMemcpyHostToDevice
);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
puts("calling the new thing");
GpuFloydWarshallBeta2(d_graph, 0, graph_size, graph_size, beta);
hipEventRecord(end);
hipEventSynchronize(end);
float* h_result_ptr = (float*) malloc(graph_size * graph_size * sizeof(float));
result.CudaStatus() = hipMemcpy(
h_result_ptr,
d_graph,
mem_size,
hipMemcpyDeviceToHost
);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
for (unsigned i = 0; i < graph_size * graph_size; ++i) {
h_result.push_back(h_result_ptr[i]);
}
float t;
hipEventElapsedTime(&t, start, end);
result.SetResult(h_result);
result.SetTimeToComplete(t / 1000.0);
result.SetSuccess(true);
return result;
}
void GpuFloydWarshallBeta2(
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta) {
/**
*
*/
if (width <= beta) {
// the computation now can fit in one block
dim3 threads(width / 4, width / 4);
dim3 grid(1, 1);
unsigned shared_mem_size = width * width * sizeof(float);
hipLaunchKernelGGL(( apsp_seq_I_shared_looped_4), dim3(grid), dim3(threads), shared_mem_size , 0, data, start, width, full_width);
} else if(width <= FAST_GEMM) {
int nw = width / 2; // new width
unsigned shared_mem_size = 2 * beta * beta * sizeof(float); // we need 2 of them
// setup execution parameters
dim3 threadsmult(beta, beta);
dim3 gridmult(nw / beta, nw / beta);
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the kernel B = AB
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw, start, start,start,start+nw, start,0);
// execute the kernel C = CA
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start, start+nw,start,start+nw,start, start,0);
// execute the kernel D += CB
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw,start+nw,start,start+nw, start+nw, start,1);
// do FW for D
GpuFloydWarshallBeta2(data, start+nw, nw, full_width, beta);
// execute the kernel B = BD
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw, start, start+nw,start,start+nw, start+nw,0);
// execute the kernel C = DC
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start, start+nw,start+nw,start+nw,start, start+nw,0);
// execute the kernel A += BC
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start,start,start+nw,start, start, start+nw,1);
} else {
/*A=floyd-warshall(A);
B=AB;
C=CA;
D=D+CB;
D=floyd-warshall(D);
B=BD;
C=DC;
A=A+BC;*/
int nw = width / 2; // new width
// setup execution parameters
dim3 gemmgrid( nw /64, nw/16 );
dim3 gemmthreads( 16, 4 );
// Remember: Column-major
float * A = data + start * full_width + start;
float * B = data + (start+nw) * full_width + start;
float * C = data + start * full_width + (start+nw);
float * D = data + (start+nw) * full_width + (start+nw);
// sgemmNN_MinPlus( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
// no need to send m & n since they are known through grid dimensions !
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the parallel multiplication kernel B = AB
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, A, full_width, B, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = CA
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, C, full_width, A, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel D += CB
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, C, full_width, B, full_width, D, full_width, nw, 1 );
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the parallel multiplication kernel B = BD
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, B, full_width, D, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = DC
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, D, full_width, C, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel A += BC
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, B, full_width, C, full_width, A, full_width, nw, 1 );
}
}
typedef void (*ApspSeqKernel)(float*, unsigned, unsigned, unsigned);
typedef void (*ApspFunction)(ApspSeqKernel, float*, unsigned, unsigned, unsigned, unsigned);
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
ApspFunction apspFunction,
ApspSeqKernel apspSeqKernel,
const float* h_graph,
const unsigned graph_size,
const unsigned beta) {
/**
*
*/
unsigned mem_size = graph_size * graph_size * sizeof(float);
float* d_graph = NULL;
MyCudaProcessResult< std::vector<float> > result;
std::vector<float> h_result;
result.CudaStatus() = hipMalloc((void**) &d_graph, mem_size);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
result.CudaStatus() = hipMemcpy(
d_graph,
h_graph,
mem_size,
hipMemcpyHostToDevice
);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
*(ApspFunction)(apspSeqKernel, d_graph, 0, graph_size, graph_size, beta);
hipEventRecord(end);
hipEventSynchronize(end);
float* h_result_ptr = (float*) malloc(graph_size * graph_size * sizeof(float));
result.CudaStatus() = hipMemcpy(
h_result_ptr,
d_graph,
mem_size,
hipMemcpyDeviceToHost
);
if (result.CudaStatus() != hipSuccess) {
std::cout << hipGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
for (unsigned i = 0; i < graph_size * graph_size; ++i) {
h_result.push_back(h_result_ptr[i]);
}
float t;
hipEventElapsedTime(&t, start, end);
result.SetResult(h_result);
result.SetTimeToComplete(t / 1000.0);
result.SetSuccess(true);
return result;
}
void GpuFloydWarshallBeta(
void (*ApspSeqKernel)(float*, unsigned, unsigned, unsigned),
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta) {
/**
*
*/
if (width <= beta) {
// the computation now can fit in one block
dim3 threads(width / 4, width / 4);
dim3 grid(1, 1);
unsigned shared_mem_size = width * width * sizeof(float);
hipLaunchKernelGGL((*ApspSeqKernel), dim3(grid), dim3(threads), shared_mem_size , 0, data, start, width, full_width);
} else if(width <= FAST_GEMM) {
int nw = width / 2; // new width
unsigned shared_mem_size = 2 * beta * beta * sizeof(float); // we need 2 of them
// setup execution parameters
dim3 threadsmult(beta, beta);
dim3 gridmult(nw / beta, nw / beta);
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the kernel B = AB
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw, start, start,start,start+nw, start,0);
// execute the kernel C = CA
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start, start+nw,start,start+nw,start, start,0);
// execute the kernel D += CB
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw,start+nw,start,start+nw, start+nw, start,1);
// do FW for D
GpuFloydWarshallBeta2(data, start+nw, nw, full_width, beta);
// execute the kernel B = BD
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start+nw, start, start+nw,start,start+nw, start+nw,0);
// execute the kernel C = DC
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start, start+nw,start+nw,start+nw,start, start+nw,0);
// execute the kernel A += BC
hipLaunchKernelGGL(( matrixMul_I), dim3(gridmult), dim3(threadsmult), shared_mem_size , 0,
data, data, data, full_width, nw, beta, start,start,start+nw,start, start, start+nw,1);
} else {
/*A=floyd-warshall(A);
B=AB;
C=CA;
D=D+CB;
D=floyd-warshall(D);
B=BD;
C=DC;
A=A+BC;*/
int nw = width / 2; // new width
// setup execution parameters
dim3 gemmgrid( nw /64, nw/16 );
dim3 gemmthreads( 16, 4 );
// Remember: Column-major
float * A = data + start * full_width + start;
float * B = data + (start+nw) * full_width + start;
float * C = data + start * full_width + (start+nw);
float * D = data + (start+nw) * full_width + (start+nw);
// sgemmNN_MinPlus( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
// no need to send m & n since they are known through grid dimensions !
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the parallel multiplication kernel B = AB
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, A, full_width, B, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = CA
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, C, full_width, A, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel D += CB
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, C, full_width, B, full_width, D, full_width, nw, 1 );
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the parallel multiplication kernel B = BD
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, B, full_width, D, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = DC
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, D, full_width, C, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel A += BC
hipLaunchKernelGGL(( sgemmNN_MinPlus), dim3(gemmgrid), dim3(gemmthreads), 0, 0, B, full_width, C, full_width, A, full_width, nw, 1 );
}
}
|
c1194f31ec37547ee0e5159188c9006612c065a2.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "project_apsp.hpp"
#include "my_process_result.hpp"
#include "sequential_apsp.hpp"
#include "gpu_apsp.hpp"
#include "old_asps.hpp"
#include <vector>
#include <iostream>
char* TEST_GRAPH = "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_1024_1024_graph.txt";
char* TEST_SOLUTION = "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_1024_1024_graph_solution.txt";
void GenerateGraphWithResult(const unsigned size);
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
const float* h_graph,
const unsigned graph_size,
const unsigned beta);
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
void(*ApspKernel)(float*, unsigned, unsigned, unsigned),
const float* h_graph,
const unsigned graph_size,
const unsigned beta);
void GpuFloydWarshallBeta(float *data, int start, int width, const unsigned full_width, const unsigned beta);
void GpuFloydWarshallBeta2(
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta);
int
main(int argc, char** argv) {
#if 0
float* graph = easy_graph_b;
for (unsigned k = 0;
#else
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//GenerateGraphWithResult(4096);
std::vector<float> f;
puts("loading graphs...");
float* graph;
unsigned graph_size = LoadSparseMatrix(
&graph,
TEST_GRAPH
);
float* solution;
LoadSparseMatrix(
&solution,
TEST_SOLUTION
);
unsigned val = 16;
for (unsigned beta = 4; beta <= val; beta *= 2) {
printf("================== Beta: %d =======================\n", beta);
puts("computing...");
MyCudaProcessResult< std::vector<float> > reference_result = DoApspOnGpu(
graph,
graph_size,
beta//32
);
puts("verifying against solution...");
if (GraphsAreEquivalentDefault(reference_result.GetResult().data(), solution, graph_size)) {
puts("The solution is correct - Hooray!");
} else {
puts("Uh oh... SOMETHING WENT WRONG");
}
printf("Solution took %lf seconds to complete.\n\n", reference_result.GetTimeToComplete());
///////////////////////////////////////////////////////////////
puts("computing 2nd version...");
MyCudaProcessResult< std::vector<float> > result = DoApspOnGpu2(
graph,
graph_size,
4 * beta//32
);
printf("\nSolution took %lf seconds to complete.\n\n", result.GetTimeToComplete());
puts("verifying against solution...");
if (GraphsAreEquivalentDefault(result.GetResult().data(), solution, graph_size)) {
puts("The solution is correct - Hooray!");
} else {
puts("Uh oh... SOMETHING WENT WRONG");
}
printf("\n~~~\nSpeedup Factor: %lf\n~~~\n", reference_result.GetTimeToComplete() / result.GetTimeToComplete());
}
free(graph);
free(solution);
#endif
return 0;
}
void GenerateGraphWithResult(const unsigned size) {
char start_file[200];
char solution_file[200];
sprintf(start_file, "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_%d_%d_graph.txt", size, size);
sprintf(solution_file, "C:/Users/Terence/Documents/GitHub/CS791v_Spring2015/Project/Code/Graphs/random_%d_%d_graph_solution.txt", size, size);
float* graph = NULL;
GenerateErdosRenyiGraph(&graph, size, 6);
WriteSparseMatrix(graph, size, start_file),
NaiveFloydWarshall(graph, size);
WriteSparseMatrix(graph, size, solution_file);
}
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
const float* h_graph,
const unsigned graph_size,
const unsigned beta) {
/**
*
*/
unsigned mem_size = graph_size * graph_size * sizeof(float);
float* d_graph = NULL;
MyCudaProcessResult< std::vector<float> > result;
std::vector<float> h_result(graph_size * graph_size);
result.CudaStatus() = cudaMalloc((void**) &d_graph, mem_size);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
result.CudaStatus() = cudaMemcpy(
d_graph,
h_graph,
mem_size,
cudaMemcpyHostToDevice
);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
GpuFloydWarshallBeta(d_graph, 0, graph_size, graph_size, beta);
cudaEventRecord(end);
cudaEventSynchronize(end);
result.CudaStatus() = cudaMemcpy(
h_result.data(),
d_graph,
mem_size,
cudaMemcpyDeviceToHost
);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
float t;
cudaEventElapsedTime(&t, start, end);
result.SetResult(h_result);
result.SetTimeToComplete(t / 1000.0);
result.SetSuccess(true);
return result;
}
void GpuFloydWarshallBeta(
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta) {
/**
*
*/
if (width <= beta) {
// the computation now can fit in one block
dim3 threads(width, width);
dim3 grid(1, 1);
apsp_seq_I<<< grid, threads >>>(data, start, width, full_width);
} else if(width <= FAST_GEMM) {
int nw = width / 2; // new width
unsigned shared_mem_size = 2 * beta * beta * sizeof(float); // we need 2 of them
// setup execution parameters
dim3 threadsmult(beta, beta);
dim3 gridmult(nw / beta, nw / beta);
// Do FW for A
GpuFloydWarshallBeta(data, start, nw, full_width, beta);
// execute the kernel B = AB
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw, start, start,start,start+nw, start,0);
// execute the kernel C = CA
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start, start+nw,start,start+nw,start, start,0);
// execute the kernel D += CB
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw,start+nw,start,start+nw, start+nw, start,1);
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the kernel B = BD
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw, start, start+nw,start,start+nw, start+nw,0);
// execute the kernel C = DC
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start, start+nw,start+nw,start+nw,start, start+nw,0);
// execute the kernel A += BC
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start,start,start+nw,start, start, start+nw,1);
} else {
/*A=floyd-warshall(A);
B=AB;
C=CA;
D=D+CB;
D=floyd-warshall(D);
B=BD;
C=DC;
A=A+BC;*/
int nw = width / 2; // new width
// setup execution parameters
dim3 gemmgrid( nw /64, nw/16 );
dim3 gemmthreads( 16, 4 );
// Remember: Column-major
float * A = data + start * full_width + start;
float * B = data + (start+nw) * full_width + start;
float * C = data + start * full_width + (start+nw);
float * D = data + (start+nw) * full_width + (start+nw);
// sgemmNN_MinPlus( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
// no need to send m & n since they are known through grid dimensions !
// Do FW for A
GpuFloydWarshallBeta(data, start, nw, full_width, beta);
// execute the parallel multiplication kernel B = AB
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(A, full_width, B, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = CA
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(C, full_width, A, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel D += CB
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(C, full_width, B, full_width, D, full_width, nw, 1 );
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the parallel multiplication kernel B = BD
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(B, full_width, D, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = DC
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(D, full_width, C, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel A += BC
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(B, full_width, C, full_width, A, full_width, nw, 1 );
}
}
////////////////////////////////////////////////////////////////////////////////
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu2(
const float* h_graph,
const unsigned graph_size,
const unsigned beta) {
/**
*
*/
unsigned mem_size = graph_size * graph_size * sizeof(float);
float* d_graph = NULL;
MyCudaProcessResult< std::vector<float> > result;
std::vector<float> h_result;
result.CudaStatus() = cudaMalloc((void**) &d_graph, mem_size);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
result.CudaStatus() = cudaMemcpy(
d_graph,
h_graph,
mem_size,
cudaMemcpyHostToDevice
);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
puts("calling the new thing");
GpuFloydWarshallBeta2(d_graph, 0, graph_size, graph_size, beta);
cudaEventRecord(end);
cudaEventSynchronize(end);
float* h_result_ptr = (float*) malloc(graph_size * graph_size * sizeof(float));
result.CudaStatus() = cudaMemcpy(
h_result_ptr,
d_graph,
mem_size,
cudaMemcpyDeviceToHost
);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
for (unsigned i = 0; i < graph_size * graph_size; ++i) {
h_result.push_back(h_result_ptr[i]);
}
float t;
cudaEventElapsedTime(&t, start, end);
result.SetResult(h_result);
result.SetTimeToComplete(t / 1000.0);
result.SetSuccess(true);
return result;
}
void GpuFloydWarshallBeta2(
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta) {
/**
*
*/
if (width <= beta) {
// the computation now can fit in one block
dim3 threads(width / 4, width / 4);
dim3 grid(1, 1);
unsigned shared_mem_size = width * width * sizeof(float);
apsp_seq_I_shared_looped_4<<< grid, threads, shared_mem_size >>>(data, start, width, full_width);
} else if(width <= FAST_GEMM) {
int nw = width / 2; // new width
unsigned shared_mem_size = 2 * beta * beta * sizeof(float); // we need 2 of them
// setup execution parameters
dim3 threadsmult(beta, beta);
dim3 gridmult(nw / beta, nw / beta);
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the kernel B = AB
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw, start, start,start,start+nw, start,0);
// execute the kernel C = CA
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start, start+nw,start,start+nw,start, start,0);
// execute the kernel D += CB
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw,start+nw,start,start+nw, start+nw, start,1);
// do FW for D
GpuFloydWarshallBeta2(data, start+nw, nw, full_width, beta);
// execute the kernel B = BD
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw, start, start+nw,start,start+nw, start+nw,0);
// execute the kernel C = DC
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start, start+nw,start+nw,start+nw,start, start+nw,0);
// execute the kernel A += BC
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start,start,start+nw,start, start, start+nw,1);
} else {
/*A=floyd-warshall(A);
B=AB;
C=CA;
D=D+CB;
D=floyd-warshall(D);
B=BD;
C=DC;
A=A+BC;*/
int nw = width / 2; // new width
// setup execution parameters
dim3 gemmgrid( nw /64, nw/16 );
dim3 gemmthreads( 16, 4 );
// Remember: Column-major
float * A = data + start * full_width + start;
float * B = data + (start+nw) * full_width + start;
float * C = data + start * full_width + (start+nw);
float * D = data + (start+nw) * full_width + (start+nw);
// sgemmNN_MinPlus( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
// no need to send m & n since they are known through grid dimensions !
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the parallel multiplication kernel B = AB
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(A, full_width, B, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = CA
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(C, full_width, A, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel D += CB
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(C, full_width, B, full_width, D, full_width, nw, 1 );
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the parallel multiplication kernel B = BD
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(B, full_width, D, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = DC
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(D, full_width, C, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel A += BC
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(B, full_width, C, full_width, A, full_width, nw, 1 );
}
}
typedef void (*ApspSeqKernel)(float*, unsigned, unsigned, unsigned);
typedef void (*ApspFunction)(ApspSeqKernel, float*, unsigned, unsigned, unsigned, unsigned);
MyCudaProcessResult< std::vector<float> >
DoApspOnGpu(
ApspFunction apspFunction,
ApspSeqKernel apspSeqKernel,
const float* h_graph,
const unsigned graph_size,
const unsigned beta) {
/**
*
*/
unsigned mem_size = graph_size * graph_size * sizeof(float);
float* d_graph = NULL;
MyCudaProcessResult< std::vector<float> > result;
std::vector<float> h_result;
result.CudaStatus() = cudaMalloc((void**) &d_graph, mem_size);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
result.CudaStatus() = cudaMemcpy(
d_graph,
h_graph,
mem_size,
cudaMemcpyHostToDevice
);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
*(ApspFunction)(apspSeqKernel, d_graph, 0, graph_size, graph_size, beta);
cudaEventRecord(end);
cudaEventSynchronize(end);
float* h_result_ptr = (float*) malloc(graph_size * graph_size * sizeof(float));
result.CudaStatus() = cudaMemcpy(
h_result_ptr,
d_graph,
mem_size,
cudaMemcpyDeviceToHost
);
if (result.CudaStatus() != cudaSuccess) {
std::cout << cudaGetErrorName(result.CudaStatus()) << std::endl;
return result;
}
for (unsigned i = 0; i < graph_size * graph_size; ++i) {
h_result.push_back(h_result_ptr[i]);
}
float t;
cudaEventElapsedTime(&t, start, end);
result.SetResult(h_result);
result.SetTimeToComplete(t / 1000.0);
result.SetSuccess(true);
return result;
}
void GpuFloydWarshallBeta(
void (*ApspSeqKernel)(float*, unsigned, unsigned, unsigned),
float *data,
int start,
int width,
const unsigned full_width,
const unsigned beta) {
/**
*
*/
if (width <= beta) {
// the computation now can fit in one block
dim3 threads(width / 4, width / 4);
dim3 grid(1, 1);
unsigned shared_mem_size = width * width * sizeof(float);
*ApspSeqKernel<<< grid, threads, shared_mem_size >>>(data, start, width, full_width);
} else if(width <= FAST_GEMM) {
int nw = width / 2; // new width
unsigned shared_mem_size = 2 * beta * beta * sizeof(float); // we need 2 of them
// setup execution parameters
dim3 threadsmult(beta, beta);
dim3 gridmult(nw / beta, nw / beta);
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the kernel B = AB
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw, start, start,start,start+nw, start,0);
// execute the kernel C = CA
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start, start+nw,start,start+nw,start, start,0);
// execute the kernel D += CB
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw,start+nw,start,start+nw, start+nw, start,1);
// do FW for D
GpuFloydWarshallBeta2(data, start+nw, nw, full_width, beta);
// execute the kernel B = BD
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start+nw, start, start+nw,start,start+nw, start+nw,0);
// execute the kernel C = DC
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start, start+nw,start+nw,start+nw,start, start+nw,0);
// execute the kernel A += BC
matrixMul_I<<< gridmult, threadsmult, shared_mem_size >>>
(data, data, data, full_width, nw, beta, start,start,start+nw,start, start, start+nw,1);
} else {
/*A=floyd-warshall(A);
B=AB;
C=CA;
D=D+CB;
D=floyd-warshall(D);
B=BD;
C=DC;
A=A+BC;*/
int nw = width / 2; // new width
// setup execution parameters
dim3 gemmgrid( nw /64, nw/16 );
dim3 gemmthreads( 16, 4 );
// Remember: Column-major
float * A = data + start * full_width + start;
float * B = data + (start+nw) * full_width + start;
float * C = data + start * full_width + (start+nw);
float * D = data + (start+nw) * full_width + (start+nw);
// sgemmNN_MinPlus( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta )
// no need to send m & n since they are known through grid dimensions !
// Do FW for A
GpuFloydWarshallBeta2(data, start, nw, full_width, beta);
// execute the parallel multiplication kernel B = AB
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(A, full_width, B, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = CA
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(C, full_width, A, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel D += CB
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(C, full_width, B, full_width, D, full_width, nw, 1 );
// do FW for D
GpuFloydWarshallBeta(data, start+nw, nw, full_width, beta);
// execute the parallel multiplication kernel B = BD
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(B, full_width, D, full_width, B, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel C = DC
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(D, full_width, C, full_width, C, full_width, nw, FLOATINF );
// execute the parallel multiplication kernel A += BC
sgemmNN_MinPlus<<<gemmgrid, gemmthreads>>>(B, full_width, C, full_width, A, full_width, nw, 1 );
}
}
|
aee71ade60373b705879d4e0264ae8ada0d5543b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <vector>
#include "../src/communicator.h"
#include "../src/error.cuh"
#define COUNT 50'000'000LL
__global__ void set_data(uint64_t *start_addr, uint64_t size, uint64_t start_val)
{
const int ithread = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
for (uint64_t ielement = ithread; ielement < size; ielement += stride) {
start_addr[ielement] = (start_val + ielement);
}
}
__global__ void test_correctness(uint64_t *start_addr, uint64_t size, uint64_t start_val)
{
const int ithread = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
for (uint64_t ielement = ithread; ielement < size; ielement += stride) {
assert(start_addr[ielement] == (start_val + ielement));
}
}
int main(int argc, char *argv[])
{
UCXBufferCommunicator communicator;
communicator.initialize(argc, argv);
int mpi_rank = communicator.mpi_rank;
int mpi_size = communicator.mpi_size;
communicator.setup_cache(2 * mpi_size, 20'000'000LL);
communicator.warmup_cache();
/* Send and recv data */
uint64_t *send_buf {nullptr};
std::vector<uint64_t *> recv_buf(mpi_size, nullptr);
std::vector<comm_handle_t> send_reqs(mpi_size, nullptr);
std::vector<comm_handle_t> recv_reqs(mpi_size, nullptr);
RMM_CALL(RMM_ALLOC(&send_buf, COUNT * sizeof(uint64_t), 0));
int grid_size {-1};
int block_size {-1};
CUDA_RT_CALL(hipOccupancyMaxPotentialBlockSize(&grid_size, &block_size, set_data));
hipLaunchKernelGGL(( set_data), dim3(grid_size), dim3(block_size), 0, 0, send_buf, COUNT, COUNT * mpi_rank);
for (int irank = 0; irank < mpi_size; irank ++) {
if (irank != mpi_rank) {
send_reqs[irank] = communicator.send((void *)send_buf, COUNT, sizeof(uint64_t), irank, 32);
}
}
int64_t count_received;
for (int irank = mpi_size - 1; irank >= 0; irank --) {
if (irank != mpi_rank) {
recv_reqs[irank] = communicator.recv(
(void **)&recv_buf[irank], &count_received, sizeof(uint64_t), irank, 32
);
}
}
communicator.waitall(send_reqs);
communicator.waitall(recv_reqs);
assert(count_received == COUNT);
/* Test the correctness */
for (int irank = 0; irank < mpi_size; irank ++) {
if (irank != mpi_rank) {
hipLaunchKernelGGL(( test_correctness), dim3(grid_size), dim3(block_size), 0, 0, recv_buf[irank], COUNT, COUNT * irank);
}
}
/* Cleanup */
RMM_CALL(RMM_FREE(send_buf, 0));
for (int irank = 0; irank < mpi_size; irank ++) {
if (irank != mpi_rank) {
RMM_CALL(RMM_FREE(recv_buf[irank], 0));
}
}
communicator.finalize();
return 0;
}
|
aee71ade60373b705879d4e0264ae8ada0d5543b.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <vector>
#include "../src/communicator.h"
#include "../src/error.cuh"
#define COUNT 50'000'000LL
__global__ void set_data(uint64_t *start_addr, uint64_t size, uint64_t start_val)
{
const int ithread = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
for (uint64_t ielement = ithread; ielement < size; ielement += stride) {
start_addr[ielement] = (start_val + ielement);
}
}
__global__ void test_correctness(uint64_t *start_addr, uint64_t size, uint64_t start_val)
{
const int ithread = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
for (uint64_t ielement = ithread; ielement < size; ielement += stride) {
assert(start_addr[ielement] == (start_val + ielement));
}
}
int main(int argc, char *argv[])
{
UCXBufferCommunicator communicator;
communicator.initialize(argc, argv);
int mpi_rank = communicator.mpi_rank;
int mpi_size = communicator.mpi_size;
communicator.setup_cache(2 * mpi_size, 20'000'000LL);
communicator.warmup_cache();
/* Send and recv data */
uint64_t *send_buf {nullptr};
std::vector<uint64_t *> recv_buf(mpi_size, nullptr);
std::vector<comm_handle_t> send_reqs(mpi_size, nullptr);
std::vector<comm_handle_t> recv_reqs(mpi_size, nullptr);
RMM_CALL(RMM_ALLOC(&send_buf, COUNT * sizeof(uint64_t), 0));
int grid_size {-1};
int block_size {-1};
CUDA_RT_CALL(cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size, set_data));
set_data<<<grid_size, block_size>>>(send_buf, COUNT, COUNT * mpi_rank);
for (int irank = 0; irank < mpi_size; irank ++) {
if (irank != mpi_rank) {
send_reqs[irank] = communicator.send((void *)send_buf, COUNT, sizeof(uint64_t), irank, 32);
}
}
int64_t count_received;
for (int irank = mpi_size - 1; irank >= 0; irank --) {
if (irank != mpi_rank) {
recv_reqs[irank] = communicator.recv(
(void **)&recv_buf[irank], &count_received, sizeof(uint64_t), irank, 32
);
}
}
communicator.waitall(send_reqs);
communicator.waitall(recv_reqs);
assert(count_received == COUNT);
/* Test the correctness */
for (int irank = 0; irank < mpi_size; irank ++) {
if (irank != mpi_rank) {
test_correctness<<<grid_size, block_size>>>(recv_buf[irank], COUNT, COUNT * irank);
}
}
/* Cleanup */
RMM_CALL(RMM_FREE(send_buf, 0));
for (int irank = 0; irank < mpi_size; irank ++) {
if (irank != mpi_rank) {
RMM_CALL(RMM_FREE(recv_buf[irank], 0));
}
}
communicator.finalize();
return 0;
}
|
0144c51aede45e474dbb985b4a990bd5db5861ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <stdio.h>
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
template <class T>
__global__ void
reduce0(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduce1(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduce2(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
template <class T>
__global__ void
reduce3(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version uses the warp shuffle operation if available to reduce
warp synchronization. When shuffle is not available the final warp's
worth of work is unrolled to reduce looping overhead.
See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
for additional information about using shuffle to perform a reduction
within a warp.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduce4(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
mySum += g_idata[i+blockSize];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
__syncthreads();
}
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version is completely unrolled, unless warp shuffle is available, then
shuffle is used within a loop. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time. When shuffle is available, it is used to reduce warp synchronization.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduce5(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
mySum += g_idata[i+blockSize];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
extern "C"
bool isPow2(unsigned int x);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
reduce(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
hipLaunchKernelGGL(( reduce0<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce1<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce2<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 3:
hipLaunchKernelGGL(( reduce3<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce4<T, 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce4<T, 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce4<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce4<T, 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce4<T, 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce4<T, 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce4<T, 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce4<T, 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce4<T, 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce4<T, 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce5<T, 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce5<T, 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce5<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce5<T, 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce5<T, 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce5<T, 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce5<T, 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce5<T, 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce5<T, 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce5<T, 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the reduction function for 3 types
template void
reduce<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
reduce<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
reduce<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
#endif // #ifndef _REDUCE_KERNEL_H_
|
0144c51aede45e474dbb985b4a990bd5db5861ef.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <stdio.h>
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
template <class T>
__global__ void
reduce0(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
template <class T>
__global__ void
reduce1(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T>
__global__ void
reduce2(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
template <class T>
__global__ void
reduce3(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version uses the warp shuffle operation if available to reduce
warp synchronization. When shuffle is not available the final warp's
worth of work is unrolled to reduce looping overhead.
See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
for additional information about using shuffle to perform a reduction
within a warp.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduce4(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
mySum += g_idata[i+blockSize];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
__syncthreads();
}
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version is completely unrolled, unless warp shuffle is available, then
shuffle is used within a loop. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time. When shuffle is available, it is used to reduce warp synchronization.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize>
__global__ void
reduce5(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
mySum += g_idata[i+blockSize];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
extern "C"
bool isPow2(unsigned int x);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
reduce(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 0:
reduce0<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce1<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce2<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 3:
reduce3<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
switch (threads)
{
case 512:
reduce4<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce4<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce4<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce4<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce4<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce4<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce4<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce4<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce4<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce4<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 5:
switch (threads)
{
case 512:
reduce5<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce5<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce5<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce5<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce5<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce5<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce5<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce5<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce5<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce5<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
break;
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the reduction function for 3 types
template void
reduce<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
reduce<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
reduce<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
#endif // #ifndef _REDUCE_KERNEL_H_
|
2072d26dbb64e266f65d97c27c38a2f6b4bd6ee4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* lapl_dfsion.cu
*
* Created on: Mar 6, 2014
* Author: p054
*/
#include "lapl_dfsion.h"
lapl_dfsion::lapl_dfsion(float *img_In, int w, int h, int nc) {
hipMalloc(&d_imgIn, (size_t) w * h * nc * sizeof(float));
CUDA_CHECK;
hipMalloc(&d_imgOut, (size_t) w * h * nc * sizeof(float));
CUDA_CHECK;
this->w = w;
this->h = h;
this->nc = nc;
hipMemcpy(d_imgIn, img_In, (size_t) w * h * nc * sizeof(float),
hipMemcpyHostToDevice);
CUDA_CHECK;
}
void lapl_dfsion::lapl_dfsion_caller(dim3 grid, dim3 block, float finalTime,
float timeStep) {
for (float t = 0.f; t < finalTime; t += timeStep) {
hipLaunchKernelGGL(( gpu_laplace_dfsion_kernel), dim3(grid), dim3(block), 0, 0, d_imgIn, d_imgOut, w, h, nc,
timeStep);
hipDeviceSynchronize();
float *swap = d_imgIn;
d_imgIn = d_imgOut;
d_imgOut = swap;
}
}
__global__ void gpu_laplace_dfsion_kernel(float * d_imgIn, float *d_imgOut,
int w, int h, int nc, float timeStep) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int ind = x + w * y;
if (x < w && y < h) {
for (int i = 0; i < nc; i++) {
d_imgOut[ind + i * w * h] = d_imgIn[ind + i * w * h] + timeStep * (((x + 1) < w ? 1 : 0) * d_imgIn[ind + i * w * h + 1] + \
(x > 0 ? 1 : 0) * d_imgIn[ind + i * w * h - 1] + ((y + 1) < h ? 1 : 0) * d_imgIn[ind + i * w * h + w] + \
(y > 0 ? 1 : 0) * d_imgIn[ind + i * w * h - w] - (((x + 1) < w ? 1 : 0) + (x > 0 ? 1 : 0) + ((y + 1) < h ? 1 : 0) + \
(y > 0 ? 1 : 0)) * d_imgIn[ind + i * w * h]);
}
}
}
void lapl_dfsion::lapl_output(float * imgOut) {
hipMemcpy(imgOut, d_imgIn, (size_t) w * h * nc * sizeof(float),
hipMemcpyDeviceToHost);
}
lapl_dfsion::~lapl_dfsion() {
hipFree(d_imgIn);
hipFree(d_imgOut);
}
|
2072d26dbb64e266f65d97c27c38a2f6b4bd6ee4.cu
|
/*
* lapl_dfsion.cu
*
* Created on: Mar 6, 2014
* Author: p054
*/
#include "lapl_dfsion.h"
lapl_dfsion::lapl_dfsion(float *img_In, int w, int h, int nc) {
cudaMalloc(&d_imgIn, (size_t) w * h * nc * sizeof(float));
CUDA_CHECK;
cudaMalloc(&d_imgOut, (size_t) w * h * nc * sizeof(float));
CUDA_CHECK;
this->w = w;
this->h = h;
this->nc = nc;
cudaMemcpy(d_imgIn, img_In, (size_t) w * h * nc * sizeof(float),
cudaMemcpyHostToDevice);
CUDA_CHECK;
}
void lapl_dfsion::lapl_dfsion_caller(dim3 grid, dim3 block, float finalTime,
float timeStep) {
for (float t = 0.f; t < finalTime; t += timeStep) {
gpu_laplace_dfsion_kernel<<<grid, block>>>(d_imgIn, d_imgOut, w, h, nc,
timeStep);
cudaDeviceSynchronize();
float *swap = d_imgIn;
d_imgIn = d_imgOut;
d_imgOut = swap;
}
}
__global__ void gpu_laplace_dfsion_kernel(float * d_imgIn, float *d_imgOut,
int w, int h, int nc, float timeStep) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int ind = x + w * y;
if (x < w && y < h) {
for (int i = 0; i < nc; i++) {
d_imgOut[ind + i * w * h] = d_imgIn[ind + i * w * h] + timeStep * (((x + 1) < w ? 1 : 0) * d_imgIn[ind + i * w * h + 1] + \
(x > 0 ? 1 : 0) * d_imgIn[ind + i * w * h - 1] + ((y + 1) < h ? 1 : 0) * d_imgIn[ind + i * w * h + w] + \
(y > 0 ? 1 : 0) * d_imgIn[ind + i * w * h - w] - (((x + 1) < w ? 1 : 0) + (x > 0 ? 1 : 0) + ((y + 1) < h ? 1 : 0) + \
(y > 0 ? 1 : 0)) * d_imgIn[ind + i * w * h]);
}
}
}
void lapl_dfsion::lapl_output(float * imgOut) {
cudaMemcpy(imgOut, d_imgIn, (size_t) w * h * nc * sizeof(float),
cudaMemcpyDeviceToHost);
}
lapl_dfsion::~lapl_dfsion() {
cudaFree(d_imgIn);
cudaFree(d_imgOut);
}
|
e1e7c397b0f9c311a40927c5e7e5175784d04bf3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void getMaxElevation_gpu( const float* values, float* currentMaxElevation) {
float tmp = values[0]+values[3];
float tmpmax = currentMaxElevation[0]+currentMaxElevation[3];
if (tmp > tmpmax ) {
currentMaxElevation[0] = values[0];
currentMaxElevation[1] = values[1];
currentMaxElevation[2] = values[2];
currentMaxElevation[3] = values[3];
}
}
// CUDA kernel function
__global__ void op_cuda_getMaxElevation(
const float *__restrict arg0,
float *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
getMaxElevation_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_getMaxElevation(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(18);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[18].name = name;
OP_kernels[18].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: getMaxElevation");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_18
int nthread = OP_BLOCK_SIZE_18;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_getMaxElevation), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[18].time += wall_t2 - wall_t1;
OP_kernels[18].transfer += (float)set->size * arg0.size;
OP_kernels[18].transfer += (float)set->size * arg1.size * 2.0f;
}
|
e1e7c397b0f9c311a40927c5e7e5175784d04bf3.cu
|
//
// auto-generated by op2.py
//
//user function
__device__ void getMaxElevation_gpu( const float* values, float* currentMaxElevation) {
float tmp = values[0]+values[3];
float tmpmax = currentMaxElevation[0]+currentMaxElevation[3];
if (tmp > tmpmax ) {
currentMaxElevation[0] = values[0];
currentMaxElevation[1] = values[1];
currentMaxElevation[2] = values[2];
currentMaxElevation[3] = values[3];
}
}
// CUDA kernel function
__global__ void op_cuda_getMaxElevation(
const float *__restrict arg0,
float *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
getMaxElevation_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_getMaxElevation(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(18);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[18].name = name;
OP_kernels[18].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: getMaxElevation");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_18
int nthread = OP_BLOCK_SIZE_18;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
op_cuda_getMaxElevation<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[18].time += wall_t2 - wall_t1;
OP_kernels[18].transfer += (float)set->size * arg0.size;
OP_kernels[18].transfer += (float)set->size * arg1.size * 2.0f;
}
|
49129eb171f2a14c4c97f4deab83e008f0484bb0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "filter_response.h"
//#include "logging.h"
//---------------------------------------------------------
__global__ void kernel_fconv(FPTYPE *gpuA, int A_dim0, int A_dim1, int A_dim2, FPTYPE *gpuB, int B_dim0, int B_dim1, int B_dim2, FPTYPE *gpuCi, int C_dim0, int C_dim1, int num_features)
{
int f = blockIdx.z*blockDim.z + threadIdx.z;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x >= C_dim1 || y >= C_dim0 || f >= num_features )
return; // skip padding thread
FPTYPE *dst = gpuCi + f*C_dim0*C_dim1 ;
FPTYPE *A_src = gpuA + f*A_dim0*A_dim1;
FPTYPE *B_src = gpuB + f*B_dim0*B_dim1;
FPTYPE val = 0;
for(int xp = 0; xp < B_dim1; xp++)
{
FPTYPE *A_off = A_src + (x+xp)*A_dim0 + y;
FPTYPE *B_off = B_src + xp*B_dim0;
for(int yp = 0; yp < B_dim0; yp++)
val += *(A_off++) * *(B_off++);
}
dst[x*C_dim0 + y] = val;
}
__global__ void kernel_sum_Ci(FPTYPE *gpuCi, int C_dim0, int C_dim1, int num_features)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x >= C_dim1 || y >= C_dim0 )
return; // skip padding thread
FPTYPE val = 0;
for(int f = 0; f < num_features; f++)
val += gpuCi[f*C_dim0*C_dim1 + x*C_dim0 + y];
gpuCi[x*C_dim0 + y] = val;
}
//---------------------------------------------------------
#define BKSIZE 4
const int STREAMSNBR = 2;
// divide x by y and round up result
#define divup(x,y) 1+(((x)-1)/(y)) // if x != 0 only
hipStream_t stream[STREAMSNBR];
void initCuda(void)
{
for(int i = 0; i < STREAMSNBR; i++)
hipStreamCreate(&stream[i]);
}
void releaseCuda(void)
{
for(int i = 0; i < STREAMSNBR; i++)
hipStreamDestroy(stream[i]);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
}
myArray<FPTYPE>** fconv_cuda(const myArray<FPTYPE> *pyra_feat_level, myArray<FPTYPE> **filters, int start, int end)
{
// get A (features)
const myArray<FPTYPE> *mxA = pyra_feat_level;
FPTYPE *A = mxA->getPtr();
int num_features = mxA->getPlanes();
int A_dim0 = mxA->getRows();
int A_dim1 = mxA->getCols();
int A_dim2 = mxA->getPlanes();
// get B (filters) and start/end
myArray<FPTYPE> **cellB = filters;
start = start - 1;
end = end - 1;
int len = end - start + 1;
// allocate output
myArray<FPTYPE> **C = (myArray<FPTYPE>**) calloc(len, sizeof(myArray<FPTYPE>*));
// use 2 streams to overlapp memory transfer and computation
int currentStream = 0;
int nextStream = 1;
// copy A on GPU
int A_bytes = mxA->getNumberOfElements() * sizeof(FPTYPE);
FPTYPE *gpuA;
hipMalloc((void**) &gpuA, A_bytes);
hipMemcpyAsync(gpuA, A, A_bytes, hipMemcpyHostToDevice, stream[currentStream]);
// memory for B anc Ci on GPU is allocated only at the first iteration
// because B and Ci size decrease
// allocate memory for output C[i] on GPU (1 C[i] per feature)
// C[i] is smaller than A, so we use the size of A to over-allocate
// memory for C
// allocate gpu memory just the first time, and not at each iteration
FPTYPE *gpuCi[STREAMSNBR] = { NULL };
int Ci_bytes_max = A_dim0 * A_dim1 * num_features * sizeof(FPTYPE);
for(int i = 0; i < STREAMSNBR; i++)
{
hipMalloc((void**) &gpuCi[i], Ci_bytes_max);
hipMemset((void*) gpuCi[i], 0, Ci_bytes_max);
}
// pre-load data on GPU for first iteration
FPTYPE *gpuB[STREAMSNBR] = { NULL };
const myArray<FPTYPE> *mxB = cellB[0+start];
FPTYPE *B = mxB->getPtr();
int B_dim0 = mxB->getRows();
int B_dim1 = mxB->getCols();
int B_dim2 = mxB->getPlanes();
// copy first B array on GPU
// allocate gpu memory just the first time, and not at each iteration
int B_bytes_first = mxB->getNumberOfElements() * sizeof(FPTYPE);
for(int i = 0; i < STREAMSNBR; i++)
hipMalloc((void**) &gpuB[i], B_bytes_first);
hipMemcpyAsync(gpuB[currentStream], B, B_bytes_first, hipMemcpyHostToDevice, stream[currentStream]);
// loop over parts and filters
for (int i = 0; i < len; i++)
{
if( A_dim2 != B_dim2 )
throw std::runtime_error("fconv_cuda(): invalid input B");
nextStream = (currentStream+1) % STREAMSNBR;
// pre-load B on GPU for next iteration
if( i < len-1 )
{
mxB = cellB[i+1+start];
B = mxB->getPtr();
int B_bytes = mxB->getNumberOfElements() * sizeof(FPTYPE);
hipMemcpyAsync(gpuB[nextStream], B, B_bytes, hipMemcpyHostToDevice, stream[nextStream]);
}
// compute size of output
int height = A_dim0 - B_dim0 + 1;
int width = A_dim1 - B_dim1 + 1;
if( height < 1 || width < 1 )
throw std::runtime_error("fconv_cuda(): invalid input: B should be smaller than A");
int C_dim0 = height;
int C_dim1 = width;
// compute C[i] for all features
dim3 dimBlock(BKSIZE, BKSIZE, 32);
dim3 dimGrid(divup(width, dimBlock.x), divup(height, dimBlock.y), divup(num_features,dimBlock.z));
//printf("w, h, f = %d %d %d\n", width, height, num_features);
//printf("dimBlock = %d %d %d\n", dimBlock.x, dimBlock.y, dimBlock.z);
//printf("dimGrid = %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
hipLaunchKernelGGL(( kernel_fconv), dim3(dimGrid), dim3(dimBlock), 0, stream[currentStream] , gpuA, A_dim0, A_dim1, A_dim2, gpuB[currentStream], B_dim0, B_dim1, B_dim2, gpuCi[currentStream], C_dim0, C_dim1, num_features);
//DBG
/*
myArray<FPTYPE> *tmpCis = new myArray<FPTYPE>(A_dim0, A_dim1, num_features);
FPTYPE *tmpCisPtr = tmpCis->getPtr();
hipMemcpyAsync(tmpCisPtr, gpuCi, Ci_bytes_max, hipMemcpyDeviceToHost, stream[currentStream]);
writeLog("fconv_cuda tmpCis=");
writeLog(tmpCis);
delete tmpCis;
*/
// sum features contributions
dim3 dimBlock2(BKSIZE, BKSIZE);
dim3 dimGrid2(divup(width, dimBlock.y), divup(height, dimBlock.x));
hipLaunchKernelGGL(( kernel_sum_Ci), dim3(dimGrid2), dim3(dimBlock2), 0, stream[currentStream] , gpuCi[currentStream], C_dim0, C_dim1, num_features);
// transfer result from GPU memory to CPU memory
C[i] = new myArray<FPTYPE>(height, width, 1, true);
FPTYPE *Ci = C[i]->getPtr();
int Ci_bytes = C[i]->getNumberOfElements() * sizeof(FPTYPE);
hipMemcpyAsync(Ci, gpuCi[currentStream], Ci_bytes, hipMemcpyDeviceToHost, stream[currentStream]);
// prepare next iteration
currentStream = nextStream;
B_dim0 = mxB->getRows();
B_dim1 = mxB->getCols();
B_dim2 = mxB->getPlanes();
}
for(int i = 0; i < STREAMSNBR; i++)
hipStreamSynchronize(stream[i]);
// release GPU memory
for(int i = 0; i < STREAMSNBR; i++)
{
hipFree(gpuCi[i]);
hipFree(gpuB[i]);
}
hipFree(gpuA);
return C;
}
void cudaWakeUp(void)
{
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement, in order to make fair comparison
int *gpuWakeUp;
hipMalloc((void**) &gpuWakeUp, 1);
hipFree(gpuWakeUp);
}
|
49129eb171f2a14c4c97f4deab83e008f0484bb0.cu
|
#include "filter_response.h"
//#include "logging.h"
//---------------------------------------------------------
__global__ void kernel_fconv(FPTYPE *gpuA, int A_dim0, int A_dim1, int A_dim2, FPTYPE *gpuB, int B_dim0, int B_dim1, int B_dim2, FPTYPE *gpuCi, int C_dim0, int C_dim1, int num_features)
{
int f = blockIdx.z*blockDim.z + threadIdx.z;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x >= C_dim1 || y >= C_dim0 || f >= num_features )
return; // skip padding thread
FPTYPE *dst = gpuCi + f*C_dim0*C_dim1 ;
FPTYPE *A_src = gpuA + f*A_dim0*A_dim1;
FPTYPE *B_src = gpuB + f*B_dim0*B_dim1;
FPTYPE val = 0;
for(int xp = 0; xp < B_dim1; xp++)
{
FPTYPE *A_off = A_src + (x+xp)*A_dim0 + y;
FPTYPE *B_off = B_src + xp*B_dim0;
for(int yp = 0; yp < B_dim0; yp++)
val += *(A_off++) * *(B_off++);
}
dst[x*C_dim0 + y] = val;
}
__global__ void kernel_sum_Ci(FPTYPE *gpuCi, int C_dim0, int C_dim1, int num_features)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x >= C_dim1 || y >= C_dim0 )
return; // skip padding thread
FPTYPE val = 0;
for(int f = 0; f < num_features; f++)
val += gpuCi[f*C_dim0*C_dim1 + x*C_dim0 + y];
gpuCi[x*C_dim0 + y] = val;
}
//---------------------------------------------------------
#define BKSIZE 4
const int STREAMSNBR = 2;
// divide x by y and round up result
#define divup(x,y) 1+(((x)-1)/(y)) // if x != 0 only
cudaStream_t stream[STREAMSNBR];
void initCuda(void)
{
for(int i = 0; i < STREAMSNBR; i++)
cudaStreamCreate(&stream[i]);
}
void releaseCuda(void)
{
for(int i = 0; i < STREAMSNBR; i++)
cudaStreamDestroy(stream[i]);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
}
myArray<FPTYPE>** fconv_cuda(const myArray<FPTYPE> *pyra_feat_level, myArray<FPTYPE> **filters, int start, int end)
{
// get A (features)
const myArray<FPTYPE> *mxA = pyra_feat_level;
FPTYPE *A = mxA->getPtr();
int num_features = mxA->getPlanes();
int A_dim0 = mxA->getRows();
int A_dim1 = mxA->getCols();
int A_dim2 = mxA->getPlanes();
// get B (filters) and start/end
myArray<FPTYPE> **cellB = filters;
start = start - 1;
end = end - 1;
int len = end - start + 1;
// allocate output
myArray<FPTYPE> **C = (myArray<FPTYPE>**) calloc(len, sizeof(myArray<FPTYPE>*));
// use 2 streams to overlapp memory transfer and computation
int currentStream = 0;
int nextStream = 1;
// copy A on GPU
int A_bytes = mxA->getNumberOfElements() * sizeof(FPTYPE);
FPTYPE *gpuA;
cudaMalloc((void**) &gpuA, A_bytes);
cudaMemcpyAsync(gpuA, A, A_bytes, cudaMemcpyHostToDevice, stream[currentStream]);
// memory for B anc Ci on GPU is allocated only at the first iteration
// because B and Ci size decrease
// allocate memory for output C[i] on GPU (1 C[i] per feature)
// C[i] is smaller than A, so we use the size of A to over-allocate
// memory for C
// allocate gpu memory just the first time, and not at each iteration
FPTYPE *gpuCi[STREAMSNBR] = { NULL };
int Ci_bytes_max = A_dim0 * A_dim1 * num_features * sizeof(FPTYPE);
for(int i = 0; i < STREAMSNBR; i++)
{
cudaMalloc((void**) &gpuCi[i], Ci_bytes_max);
cudaMemset((void*) gpuCi[i], 0, Ci_bytes_max);
}
// pre-load data on GPU for first iteration
FPTYPE *gpuB[STREAMSNBR] = { NULL };
const myArray<FPTYPE> *mxB = cellB[0+start];
FPTYPE *B = mxB->getPtr();
int B_dim0 = mxB->getRows();
int B_dim1 = mxB->getCols();
int B_dim2 = mxB->getPlanes();
// copy first B array on GPU
// allocate gpu memory just the first time, and not at each iteration
int B_bytes_first = mxB->getNumberOfElements() * sizeof(FPTYPE);
for(int i = 0; i < STREAMSNBR; i++)
cudaMalloc((void**) &gpuB[i], B_bytes_first);
cudaMemcpyAsync(gpuB[currentStream], B, B_bytes_first, cudaMemcpyHostToDevice, stream[currentStream]);
// loop over parts and filters
for (int i = 0; i < len; i++)
{
if( A_dim2 != B_dim2 )
throw std::runtime_error("fconv_cuda(): invalid input B");
nextStream = (currentStream+1) % STREAMSNBR;
// pre-load B on GPU for next iteration
if( i < len-1 )
{
mxB = cellB[i+1+start];
B = mxB->getPtr();
int B_bytes = mxB->getNumberOfElements() * sizeof(FPTYPE);
cudaMemcpyAsync(gpuB[nextStream], B, B_bytes, cudaMemcpyHostToDevice, stream[nextStream]);
}
// compute size of output
int height = A_dim0 - B_dim0 + 1;
int width = A_dim1 - B_dim1 + 1;
if( height < 1 || width < 1 )
throw std::runtime_error("fconv_cuda(): invalid input: B should be smaller than A");
int C_dim0 = height;
int C_dim1 = width;
// compute C[i] for all features
dim3 dimBlock(BKSIZE, BKSIZE, 32);
dim3 dimGrid(divup(width, dimBlock.x), divup(height, dimBlock.y), divup(num_features,dimBlock.z));
//printf("w, h, f = %d %d %d\n", width, height, num_features);
//printf("dimBlock = %d %d %d\n", dimBlock.x, dimBlock.y, dimBlock.z);
//printf("dimGrid = %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
kernel_fconv<<< dimGrid, dimBlock, 0, stream[currentStream] >>>(gpuA, A_dim0, A_dim1, A_dim2, gpuB[currentStream], B_dim0, B_dim1, B_dim2, gpuCi[currentStream], C_dim0, C_dim1, num_features);
//DBG
/*
myArray<FPTYPE> *tmpCis = new myArray<FPTYPE>(A_dim0, A_dim1, num_features);
FPTYPE *tmpCisPtr = tmpCis->getPtr();
cudaMemcpyAsync(tmpCisPtr, gpuCi, Ci_bytes_max, cudaMemcpyDeviceToHost, stream[currentStream]);
writeLog("fconv_cuda tmpCis=");
writeLog(tmpCis);
delete tmpCis;
*/
// sum features contributions
dim3 dimBlock2(BKSIZE, BKSIZE);
dim3 dimGrid2(divup(width, dimBlock.y), divup(height, dimBlock.x));
kernel_sum_Ci<<< dimGrid2, dimBlock2, 0, stream[currentStream] >>>(gpuCi[currentStream], C_dim0, C_dim1, num_features);
// transfer result from GPU memory to CPU memory
C[i] = new myArray<FPTYPE>(height, width, 1, true);
FPTYPE *Ci = C[i]->getPtr();
int Ci_bytes = C[i]->getNumberOfElements() * sizeof(FPTYPE);
cudaMemcpyAsync(Ci, gpuCi[currentStream], Ci_bytes, cudaMemcpyDeviceToHost, stream[currentStream]);
// prepare next iteration
currentStream = nextStream;
B_dim0 = mxB->getRows();
B_dim1 = mxB->getCols();
B_dim2 = mxB->getPlanes();
}
for(int i = 0; i < STREAMSNBR; i++)
cudaStreamSynchronize(stream[i]);
// release GPU memory
for(int i = 0; i < STREAMSNBR; i++)
{
cudaFree(gpuCi[i]);
cudaFree(gpuB[i]);
}
cudaFree(gpuA);
return C;
}
void cudaWakeUp(void)
{
// make a dummy memory allocation to wake up NVIDIA driver
// before starting time measurement, in order to make fair comparison
int *gpuWakeUp;
cudaMalloc((void**) &gpuWakeUp, 1);
cudaFree(gpuWakeUp);
}
|
b72b5705e372804d1c8af642eceae4283223a623.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_analyzer_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "layer_updater_schema_factory.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
__global__ void convert_compacted_to_raw_analazer_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
network_analyzer_cuda::network_analyzer_cuda(
network_schema_smart_ptr schema,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_analyzer(schema)
, cuda_config(cuda_config)
{
cuda_config->set_device();
const const_layer_list& layer_list = *schema;
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config));
setup_network_cuda();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it)
updater_schema_data.push_back((*it)->get_schema_buffers());
}
network_analyzer_cuda::~network_analyzer_cuda()
{
}
void network_analyzer_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
}
void network_analyzer_cuda::layer_config_list_modified()
{
cuda_config->set_device();
updater_list.clear();
updater_input_and_all_buffers_pack.clear();
output_errors_buffers.clear();
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf)
{
updater_list.push_back(
(*it)->create_updater(
*it_conf,
*(it_conf + 1),
true));
}
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
input_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
input_converted_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it)
{
layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(1);
updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers));
output_buffer = all_buffers.output_neurons_buffer;
}
cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf;
for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it)
{
output_errors_buffers.push_back(output_errors);
layer_updater_cuda::buffer_set& all_buffers = it->second;
if (all_buffers.input_errors_buffer != 0)
output_errors = all_buffers.input_errors_buffer;
}
}
void network_analyzer_cuda::actual_set_data(network_data_smart_ptr data)
{
cuda_config->set_device();
net_data.clear();
net_data_custom.clear();
for(layer_data_list::const_iterator it2 = data->data_list.begin(); it2 != data->data_list.end(); ++it2)
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
for(std::vector<std::vector<float> >::iterator it = (*it2)->begin(); it != (*it2)->end(); ++it)
{
size_t buffer_size = it->size() * sizeof(float);
cuda_linear_buffer_device_smart_ptr new_buf(new cuda_linear_buffer_device(buffer_size));
cuda_safe_call(hipMemcpy(*new_buf, &(*it->begin()), buffer_size, hipMemcpyHostToDevice));
res.push_back(new_buf);
}
net_data.push_back(res);
}
for(layer_data_custom_list::const_iterator it2 = data->data_custom_list.begin(); it2 != data->data_custom_list.end(); ++it2)
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
for(std::vector<std::vector<int> >::iterator it = (*it2)->begin(); it != (*it2)->end(); ++it)
{
size_t buffer_size = it->size() * sizeof(int);
cuda_linear_buffer_device_smart_ptr new_buf(new cuda_linear_buffer_device(buffer_size));
cuda_safe_call(hipMemcpy(*new_buf, &(*it->begin()), buffer_size, hipMemcpyHostToDevice));
res.push_back(new_buf);
}
net_data_custom.push_back(res);
}
}
void network_analyzer_cuda::actual_set_input_data(
const void * input,
neuron_data_type::input_type type_code)
{
cuda_config->set_device();
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
// Convert input
if (type_code == neuron_data_type::type_byte)
{
cuda_safe_call(hipMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
hipMemcpyHostToDevice,
*command_stream));
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( convert_compacted_to_raw_analazer_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream,
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(hipMemcpyAsync(
*input_converted_buf,
input,
input_neuron_count * input_neuron_elem_size,
hipMemcpyHostToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_set_input_data cannot handle input neurons of type %1%") % type_code).str());
// Forward updater
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_custom_it = net_data_custom.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin();
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin();
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++net_data_custom_it, ++layer_config_it)
{
(*it)->enqueue_test(
0,
*command_stream,
*schema_data_it,
*net_data_it,
*net_data_custom_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1,
true);
}
}
cuda_safe_call(hipStreamSynchronize(*command_stream));
}
std::pair<layer_configuration_specific_snapshot_smart_ptr, layer_configuration_specific_snapshot_smart_ptr> network_analyzer_cuda::actual_run_backprop(
const layer_configuration_specific_snapshot& output_data,
const std::vector<unsigned int>& output_offset_list,
unsigned int output_layer_id,
const std::vector<std::pair<unsigned int, unsigned int> >& input_rectangle_borders)
{
cuda_config->set_device();
std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin() + (output_errors_buffers.size() - output_layer_id - 1);
// Initialize output errors
{
float * dst = **output_errors_it;
cuda_util::set_with_value(*cuda_config, dst, 0.0F, (*output_errors_it)->get_size() / sizeof(float), *command_stream);
const layer_configuration_specific& output_config = layer_config_list[output_layer_id + 1];
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)output_offset_list.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= output_data.config.dimension_sizes[sequential_chunk_dimension_count];
if (output_data.config.dimension_sizes[sequential_chunk_dimension_count] != output_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::const_iterator src_it = output_data.data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < output_data.config.feature_map_count; ++feature_map_id)
{
unsigned int dst_fm_offset = feature_map_id * output_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> src_list(output_offset_list.size(), 0);
bool cont = true;
while (cont)
{
bool should_copy = false;
for(std::vector<float>::const_iterator it = src_it; it != src_it + sequential_copy_elem_count; ++it)
{
if (*src_it != 0.0F)
{
should_copy = true;
break;
}
}
if (should_copy)
{
std::vector<unsigned int> dst_offset_list(output_offset_list);
for(unsigned int i = sequential_chunk_dimension_count; i < dst_offset_list.size(); ++i)
dst_offset_list[i] += src_list[i];
cuda_safe_call(hipMemcpyAsync(dst + dst_fm_offset + output_config.get_pos(dst_offset_list), &(*src_it), sequential_copy_elem_count * sizeof(float), hipMemcpyHostToDevice, *command_stream));
};
cont = false;
for(int i = sequential_chunk_dimension_count; i < src_list.size(); ++i)
{
src_list[i]++;
if (src_list[i] < output_data.config.dimension_sizes[i])
{
cont = true;
break;
}
else
src_list[i] = 0;
}
src_it += sequential_copy_elem_count;
}
}
}
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin() + (updater_input_and_all_buffers_pack.size() - output_layer_id - 1);
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin() + (updater_schema_data.size() - output_layer_id - 1);
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin() + (net_data.size() - output_layer_id - 1);
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_custom_it = net_data_custom.rbegin() + (net_data.size() - output_layer_id - 1);
for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin() + (updater_list.size() - output_layer_id - 1); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++output_errors_it, ++net_data_it, ++net_data_custom_it)
{
if (input_and_all_buffers_pack_it->second.input_errors_buffer)
{
(*it)->enqueue_backprop(
*command_stream,
*schema_data_it,
*net_data_it,
*net_data_custom_it,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->first,
*output_errors_it,
input_and_all_buffers_pack_it->second.input_errors_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1,
true);
}
else
{
(*it)->enqueue_test(
0,
*command_stream,
*schema_data_it,
*net_data_it,
*net_data_custom_it,
*output_errors_it,
*output_errors_it,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1,
true);
}
}
layer_configuration_specific_snapshot_smart_ptr res(new layer_configuration_specific_snapshot());
layer_configuration_specific_snapshot_smart_ptr input_data(new layer_configuration_specific_snapshot());
// Copy input errors
{
res->config.feature_map_count = layer_config_list.front().feature_map_count;
input_data->config.feature_map_count = layer_config_list.front().feature_map_count;
unsigned int elem_count = res->config.feature_map_count;
for(int i = 0; i < input_rectangle_borders.size(); ++i)
{
unsigned int val = input_rectangle_borders[i].second - input_rectangle_borders[i].first;
elem_count *= val;
res->config.dimension_sizes.push_back(val);
input_data->config.dimension_sizes.push_back(val);
}
res->data.resize(elem_count);
input_data->data.resize(elem_count);
cuda_linear_buffer_device_smart_ptr input_errors_buf = updater_input_and_all_buffers_pack.front().second.input_errors_buffer;
if (input_errors_buf == 0)
input_errors_buf = output_errors_buffers.back();
float * src = *input_errors_buf;
float * src_input_data = *input_converted_buf;
const layer_configuration_specific& input_config = layer_config_list.front();
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)input_config.dimension_sizes.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= res->config.dimension_sizes[sequential_chunk_dimension_count];
if (res->config.dimension_sizes[sequential_chunk_dimension_count] != input_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::iterator dst_it = res->data.begin();
std::vector<float>::iterator dst_input_data_it = input_data->data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < input_config.feature_map_count; ++feature_map_id)
{
unsigned int src_fm_offset = feature_map_id * input_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> dst_list(input_rectangle_borders.size(), 0);
bool cont = true;
while (cont)
{
std::vector<unsigned int> src_offset_list(input_rectangle_borders.size());
for(int i = 0; i < src_offset_list.size(); ++i)
src_offset_list[i] = input_rectangle_borders[i].first;
for(unsigned int i = sequential_chunk_dimension_count; i < src_offset_list.size(); ++i)
src_offset_list[i] += dst_list[i];
cuda_safe_call(hipMemcpyAsync(&(*dst_it), src + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), hipMemcpyDeviceToHost, *command_stream));
cuda_safe_call(hipMemcpyAsync(&(*dst_input_data_it), src_input_data + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), hipMemcpyDeviceToHost, *command_stream));
cont = false;
for(int i = sequential_chunk_dimension_count; i < dst_list.size(); ++i)
{
dst_list[i]++;
if (dst_list[i] < res->config.dimension_sizes[i])
{
cont = true;
break;
}
else
dst_list[i] = 0;
}
dst_it += sequential_copy_elem_count;
dst_input_data_it += sequential_copy_elem_count;
}
}
}
cuda_safe_call(hipStreamSynchronize(*command_stream));
return std::make_pair(res, input_data);
}
}
}
|
b72b5705e372804d1c8af642eceae4283223a623.cu
|
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "network_analyzer_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_linear_buffer_device.h"
#include "cuda_linear_buffer_host.h"
#include "util_cuda.h"
#include "cuda_event.h"
#include "layer_updater_schema_factory.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
__global__ void convert_compacted_to_raw_analazer_kernel(
const uchar4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
uchar4 inp = input[elem_id];
float4 val;
val.x = inp.x * (1.0F / 255.0F);
val.y = inp.y * (1.0F / 255.0F);
val.z = inp.z * (1.0F / 255.0F);
val.w = inp.w * (1.0F / 255.0F);
output[elem_id] = val;
}
}
network_analyzer_cuda::network_analyzer_cuda(
network_schema_smart_ptr schema,
cuda_running_configuration_const_smart_ptr cuda_config)
: network_analyzer(schema)
, cuda_config(cuda_config)
{
cuda_config->set_device();
const const_layer_list& layer_list = *schema;
for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it)
updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config));
setup_network_cuda();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it)
updater_schema_data.push_back((*it)->get_schema_buffers());
}
network_analyzer_cuda::~network_analyzer_cuda()
{
}
void network_analyzer_cuda::setup_network_cuda()
{
command_stream = cuda_stream_smart_ptr(new cuda_stream());
}
void network_analyzer_cuda::layer_config_list_modified()
{
cuda_config->set_device();
updater_list.clear();
updater_input_and_all_buffers_pack.clear();
output_errors_buffers.clear();
layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin();
for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf)
{
updater_list.push_back(
(*it)->create_updater(
*it_conf,
*(it_conf + 1),
true));
}
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
input_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
input_converted_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf;
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it)
{
layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(1);
updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers));
output_buffer = all_buffers.output_neurons_buffer;
}
cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * sizeof(float)));
cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf;
for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it)
{
output_errors_buffers.push_back(output_errors);
layer_updater_cuda::buffer_set& all_buffers = it->second;
if (all_buffers.input_errors_buffer != 0)
output_errors = all_buffers.input_errors_buffer;
}
}
void network_analyzer_cuda::actual_set_data(network_data_smart_ptr data)
{
cuda_config->set_device();
net_data.clear();
net_data_custom.clear();
for(layer_data_list::const_iterator it2 = data->data_list.begin(); it2 != data->data_list.end(); ++it2)
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
for(std::vector<std::vector<float> >::iterator it = (*it2)->begin(); it != (*it2)->end(); ++it)
{
size_t buffer_size = it->size() * sizeof(float);
cuda_linear_buffer_device_smart_ptr new_buf(new cuda_linear_buffer_device(buffer_size));
cuda_safe_call(cudaMemcpy(*new_buf, &(*it->begin()), buffer_size, cudaMemcpyHostToDevice));
res.push_back(new_buf);
}
net_data.push_back(res);
}
for(layer_data_custom_list::const_iterator it2 = data->data_custom_list.begin(); it2 != data->data_custom_list.end(); ++it2)
{
std::vector<cuda_linear_buffer_device_smart_ptr> res;
for(std::vector<std::vector<int> >::iterator it = (*it2)->begin(); it != (*it2)->end(); ++it)
{
size_t buffer_size = it->size() * sizeof(int);
cuda_linear_buffer_device_smart_ptr new_buf(new cuda_linear_buffer_device(buffer_size));
cuda_safe_call(cudaMemcpy(*new_buf, &(*it->begin()), buffer_size, cudaMemcpyHostToDevice));
res.push_back(new_buf);
}
net_data_custom.push_back(res);
}
}
void network_analyzer_cuda::actual_set_input_data(
const void * input,
neuron_data_type::input_type type_code)
{
cuda_config->set_device();
unsigned int input_neuron_count = layer_config_list.front().get_neuron_count();
unsigned int output_neuron_count = layer_config_list.back().get_neuron_count();
size_t input_neuron_elem_size = neuron_data_type::get_input_size(type_code);
// Convert input
if (type_code == neuron_data_type::type_byte)
{
cuda_safe_call(cudaMemcpyAsync(
*input_buf,
input,
input_neuron_count * input_neuron_elem_size,
cudaMemcpyHostToDevice,
*command_stream));
int elem_count = (input_neuron_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
convert_compacted_to_raw_analazer_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>(
*input_buf,
*input_converted_buf,
elem_count);
}
else if (type_code == neuron_data_type::type_float)
{
cuda_safe_call(cudaMemcpyAsync(
*input_converted_buf,
input,
input_neuron_count * input_neuron_elem_size,
cudaMemcpyHostToDevice,
*command_stream));
}
else throw neural_network_exception((boost::format("actual_set_input_data cannot handle input neurons of type %1%") % type_code).str());
// Forward updater
{
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin();
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_custom_it = net_data_custom.begin();
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin();
layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin();
for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++net_data_custom_it, ++layer_config_it)
{
(*it)->enqueue_test(
0,
*command_stream,
*schema_data_it,
*net_data_it,
*net_data_custom_it,
input_and_all_buffers_pack_it->first,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1,
true);
}
}
cuda_safe_call(cudaStreamSynchronize(*command_stream));
}
std::pair<layer_configuration_specific_snapshot_smart_ptr, layer_configuration_specific_snapshot_smart_ptr> network_analyzer_cuda::actual_run_backprop(
const layer_configuration_specific_snapshot& output_data,
const std::vector<unsigned int>& output_offset_list,
unsigned int output_layer_id,
const std::vector<std::pair<unsigned int, unsigned int> >& input_rectangle_borders)
{
cuda_config->set_device();
std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin() + (output_errors_buffers.size() - output_layer_id - 1);
// Initialize output errors
{
float * dst = **output_errors_it;
cuda_util::set_with_value(*cuda_config, dst, 0.0F, (*output_errors_it)->get_size() / sizeof(float), *command_stream);
const layer_configuration_specific& output_config = layer_config_list[output_layer_id + 1];
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)output_offset_list.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= output_data.config.dimension_sizes[sequential_chunk_dimension_count];
if (output_data.config.dimension_sizes[sequential_chunk_dimension_count] != output_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::const_iterator src_it = output_data.data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < output_data.config.feature_map_count; ++feature_map_id)
{
unsigned int dst_fm_offset = feature_map_id * output_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> src_list(output_offset_list.size(), 0);
bool cont = true;
while (cont)
{
bool should_copy = false;
for(std::vector<float>::const_iterator it = src_it; it != src_it + sequential_copy_elem_count; ++it)
{
if (*src_it != 0.0F)
{
should_copy = true;
break;
}
}
if (should_copy)
{
std::vector<unsigned int> dst_offset_list(output_offset_list);
for(unsigned int i = sequential_chunk_dimension_count; i < dst_offset_list.size(); ++i)
dst_offset_list[i] += src_list[i];
cuda_safe_call(cudaMemcpyAsync(dst + dst_fm_offset + output_config.get_pos(dst_offset_list), &(*src_it), sequential_copy_elem_count * sizeof(float), cudaMemcpyHostToDevice, *command_stream));
};
cont = false;
for(int i = sequential_chunk_dimension_count; i < src_list.size(); ++i)
{
src_list[i]++;
if (src_list[i] < output_data.config.dimension_sizes[i])
{
cont = true;
break;
}
else
src_list[i] = 0;
}
src_it += sequential_copy_elem_count;
}
}
}
std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin() + (updater_input_and_all_buffers_pack.size() - output_layer_id - 1);
std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin() + (updater_schema_data.size() - output_layer_id - 1);
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin() + (net_data.size() - output_layer_id - 1);
std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_custom_it = net_data_custom.rbegin() + (net_data.size() - output_layer_id - 1);
for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin() + (updater_list.size() - output_layer_id - 1); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++output_errors_it, ++net_data_it, ++net_data_custom_it)
{
if (input_and_all_buffers_pack_it->second.input_errors_buffer)
{
(*it)->enqueue_backprop(
*command_stream,
*schema_data_it,
*net_data_it,
*net_data_custom_it,
input_and_all_buffers_pack_it->second.output_neurons_buffer,
input_and_all_buffers_pack_it->first,
*output_errors_it,
input_and_all_buffers_pack_it->second.input_errors_buffer,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1,
true);
}
else
{
(*it)->enqueue_test(
0,
*command_stream,
*schema_data_it,
*net_data_it,
*net_data_custom_it,
*output_errors_it,
*output_errors_it,
input_and_all_buffers_pack_it->second.additional_buffers,
input_and_all_buffers_pack_it->second.dynamic_memobjects,
1,
true);
}
}
layer_configuration_specific_snapshot_smart_ptr res(new layer_configuration_specific_snapshot());
layer_configuration_specific_snapshot_smart_ptr input_data(new layer_configuration_specific_snapshot());
// Copy input errors
{
res->config.feature_map_count = layer_config_list.front().feature_map_count;
input_data->config.feature_map_count = layer_config_list.front().feature_map_count;
unsigned int elem_count = res->config.feature_map_count;
for(int i = 0; i < input_rectangle_borders.size(); ++i)
{
unsigned int val = input_rectangle_borders[i].second - input_rectangle_borders[i].first;
elem_count *= val;
res->config.dimension_sizes.push_back(val);
input_data->config.dimension_sizes.push_back(val);
}
res->data.resize(elem_count);
input_data->data.resize(elem_count);
cuda_linear_buffer_device_smart_ptr input_errors_buf = updater_input_and_all_buffers_pack.front().second.input_errors_buffer;
if (input_errors_buf == 0)
input_errors_buf = output_errors_buffers.back();
float * src = *input_errors_buf;
float * src_input_data = *input_converted_buf;
const layer_configuration_specific& input_config = layer_config_list.front();
int sequential_chunk_dimension_count = -1;
unsigned int sequential_copy_elem_count = 1;
while (sequential_chunk_dimension_count < (int)input_config.dimension_sizes.size() - 1)
{
++sequential_chunk_dimension_count;
sequential_copy_elem_count *= res->config.dimension_sizes[sequential_chunk_dimension_count];
if (res->config.dimension_sizes[sequential_chunk_dimension_count] != input_config.dimension_sizes[sequential_chunk_dimension_count])
break;
}
++sequential_chunk_dimension_count;
std::vector<float>::iterator dst_it = res->data.begin();
std::vector<float>::iterator dst_input_data_it = input_data->data.begin();
for(unsigned int feature_map_id = 0; feature_map_id < input_config.feature_map_count; ++feature_map_id)
{
unsigned int src_fm_offset = feature_map_id * input_config.get_neuron_count_per_feature_map();
std::vector<unsigned int> dst_list(input_rectangle_borders.size(), 0);
bool cont = true;
while (cont)
{
std::vector<unsigned int> src_offset_list(input_rectangle_borders.size());
for(int i = 0; i < src_offset_list.size(); ++i)
src_offset_list[i] = input_rectangle_borders[i].first;
for(unsigned int i = sequential_chunk_dimension_count; i < src_offset_list.size(); ++i)
src_offset_list[i] += dst_list[i];
cuda_safe_call(cudaMemcpyAsync(&(*dst_it), src + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), cudaMemcpyDeviceToHost, *command_stream));
cuda_safe_call(cudaMemcpyAsync(&(*dst_input_data_it), src_input_data + src_fm_offset + input_config.get_pos(src_offset_list), sequential_copy_elem_count * sizeof(float), cudaMemcpyDeviceToHost, *command_stream));
cont = false;
for(int i = sequential_chunk_dimension_count; i < dst_list.size(); ++i)
{
dst_list[i]++;
if (dst_list[i] < res->config.dimension_sizes[i])
{
cont = true;
break;
}
else
dst_list[i] = 0;
}
dst_it += sequential_copy_elem_count;
dst_input_data_it += sequential_copy_elem_count;
}
}
}
cuda_safe_call(cudaStreamSynchronize(*command_stream));
return std::make_pair(res, input_data);
}
}
}
|
9228609a6ac68a4cee4dc8e7d8a366bafe5b1560.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_wave_solver.h"
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/transform.h>
namespace Femog {
namespace Fem {
namespace {
struct saxpy_functor : public thrust::binary_function<float, float, float> {
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__ float operator()(const float& x, const float& y) const {
return a * x + y;
}
};
struct saxpby_functor : public thrust::binary_function<float, float, float> {
const float a;
const float b;
saxpby_functor(float _a, float _b) : a(_a), b(_b) {}
__host__ __device__ float operator()(const float& x, const float& y) const {
return a * x + b * y;
}
};
__global__ void spmv_csr_kernel(int n, float alpha, const float* values,
const int* row_cdf, const int* col_index,
const float* input, float beta, float* output) {
const int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < n) {
float dot = 0;
const int start = row_cdf[row];
const int end = row_cdf[row + 1];
for (int j = start; j < end; ++j) dot += values[j] * input[col_index[j]];
output[row] = beta * output[row] + alpha * dot;
}
}
__global__ void rhs_kernel(int n, const float* mass_values,
const float* stiffness_values, const int* row_cdf,
const int* col_index, const float* wave_values,
const float* evolution_values, float dt,
float* output) {
const int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < n) {
float mass_dot = 0;
float stiffness_dot = 0;
const int start = row_cdf[row];
const int end = row_cdf[row + 1];
for (int j = start; j < end; ++j) {
mass_dot += mass_values[j] * evolution_values[col_index[j]];
stiffness_dot += stiffness_values[j] * wave_values[col_index[j]];
}
output[row] = mass_dot - dt * stiffness_dot;
}
}
__global__ void spmv_csr_vector_kernel(int n, float alpha, const float* values,
const int* row_cdf, const int* col_index,
const float* input, float beta,
float* output) {
__shared__ float vals[1024];
const int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
const int warp_id = thread_id / 32;
const int lane = thread_id & (32 - 1);
const int row = warp_id;
if (row < n) {
const int start = row_cdf[row];
const int end = row_cdf[row + 1];
vals[threadIdx.x] = 0;
for (int j = start + lane; j < end; j += 32)
vals[threadIdx.x] += values[j] * input[col_index[j]];
__syncthreads();
if (lane < 16) vals[threadIdx.x] += vals[threadIdx.x + 16];
__syncthreads();
if (lane < 8) vals[threadIdx.x] += vals[threadIdx.x + 8];
__syncthreads();
if (lane < 4) vals[threadIdx.x] += vals[threadIdx.x + 4];
__syncthreads();
if (lane < 2) vals[threadIdx.x] += vals[threadIdx.x + 2];
__syncthreads();
if (lane < 1) vals[threadIdx.x] += vals[threadIdx.x + 1];
__syncthreads();
if (lane == 0) output[row] = beta * output[row] + alpha * vals[threadIdx.x];
}
}
} // namespace
Wave_solver::Wave_solver(int n, float* mass, float* stiffness, int* row,
int* col, float* wave_data, float* evolution_data)
: dimension(n), nnz(row[n]) {
// std::cout << "dimension = " << dimension << "\nnnz = " << nnz << std::endl;
// thrust::copy(mass, mass + nnz, std::ostream_iterator<float>(std::cout,
// ","));
hipMalloc((void**)&mass_values, nnz * sizeof(float));
hipMalloc((void**)&stiffness_values, nnz * sizeof(float));
hipMalloc((void**)&wave, dimension * sizeof(float));
hipMalloc((void**)&evolution, dimension * sizeof(float));
hipMalloc((void**)&row_cdf, (dimension + 1) * sizeof(int));
hipMalloc((void**)&col_index, nnz * sizeof(int));
hipMemcpy(mass_values, mass, nnz * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(stiffness_values, stiffness, nnz * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(wave, wave_data, dimension * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(evolution, evolution_data, dimension * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(row_cdf, row, (dimension + 1) * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(col_index, col, nnz * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&tmp_p, dimension * sizeof(float));
hipMalloc((void**)&tmp_r, dimension * sizeof(float));
hipMalloc((void**)&tmp_y, dimension * sizeof(float));
hipDeviceProp_t property;
// int count;
// hipGetDeviceCount(&count);
// for (int i = 0; i < count; ++i){
hipGetDeviceProperties(&property, 0);
// }
threads_per_block = property.maxThreadsPerBlock;
blocks = (dimension + threads_per_block - 1) / threads_per_block;
std::cout << "CUDA Device Name = " << property.name << std::endl
<< "total global memory = " << property.totalGlobalMem << std::endl
<< "shared memory per block = " << property.sharedMemPerBlock
<< std::endl
<< "total const memory = " << property.totalConstMem << std::endl
<< "warp size = " << property.warpSize << std::endl
<< "maximum threads per block = " << property.maxThreadsPerBlock
<< std::endl;
std::cout << "maximum threads dimension = (";
std::copy(property.maxThreadsDim, property.maxThreadsDim + 3,
std::ostream_iterator<int>(std::cout, ","));
std::cout << ")" << std::endl;
std::cout << "maximum block dimension = (";
std::copy(property.maxGridSize, property.maxGridSize + 3,
std::ostream_iterator<int>(std::cout, ","));
std::cout << ")" << std::endl << std::endl;
std::cout << "used threads_per_block = " << threads_per_block << std::endl
<< "used blocks = " << blocks << std::endl;
}
Wave_solver::~Wave_solver() {
hipFree(mass_values);
hipFree(stiffness_values);
hipFree(wave);
hipFree(evolution);
hipFree(row_cdf);
hipFree(col_index);
hipFree(tmp_p);
hipFree(tmp_r);
hipFree(tmp_y);
}
void Wave_solver::operator()(float c, float dt) {
thrust::device_ptr<float> dev_evolution =
thrust::device_pointer_cast(evolution);
thrust::device_ptr<float> dev_wave(wave);
thrust::device_ptr<float> dev_tmp_y(tmp_y);
thrust::device_ptr<float> dev_tmp_p(tmp_p);
thrust::device_ptr<float> dev_tmp_r(tmp_r);
// Eigen::VectorXf rhs =
// (1.0f - gamma * dt) * mass_matrix * y - dt * c * c * stiffness_matrix *
// x;
//hipLaunchKernelGGL(( spmv_csr_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
// dimension, 1.0f, mass_values, row_cdf, col_index, evolution, 0.0f,
// tmp_y);
//hipLaunchKernelGGL(( spmv_csr_kernel), dim3(blocks), dim3(threads_per_block), 0, 0, dimension, -dt * c * c,
// stiffness_values, row_cdf,
// col_index, wave, 1.0f,
// tmp_y);
hipLaunchKernelGGL(( rhs_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
dimension, mass_values, stiffness_values, row_cdf, col_index, wave,
evolution, c * c * dt, tmp_r);
// Eigen::VectorXf r = A * x - b;
hipLaunchKernelGGL(( spmv_csr_kernel), dim3(blocks), dim3(threads_per_block), 0, 0, dimension, 1.0f, mass_values,
row_cdf, col_index, evolution,
-1.0f, tmp_r);
// thrust::copy(dev_tmp_y, dev_tmp_y + dimension, dev_tmp_r);
// Eigen::VectorXf p = -r;
thrust::transform(dev_tmp_r, dev_tmp_r + dimension, dev_tmp_p,
thrust::negate<float>());
// float res = r.squaredNorm();
float res =
thrust::inner_product(dev_tmp_r, dev_tmp_r + dimension, dev_tmp_r, 0.0f);
// std::cout << "res = " << res << std::endl;
int it = 0;
// for (auto i = 0; i < dimension; ++i) {
while ((it < 1 || res > 1e-6f) && it < dimension) {
// y = A * p;
hipLaunchKernelGGL(( spmv_csr_kernel), dim3(blocks), dim3(threads_per_block), 0, 0,
dimension, 1.0f, mass_values, row_cdf, col_index, tmp_p, 0.0f, tmp_y);
// thrust::copy(dev_tmp_y, dev_tmp_y + dimension,
// std::ostream_iterator<float>(std::cout, " "));
// const float alpha = res / p.dot(y);
const float tmp = thrust::inner_product(dev_tmp_p, dev_tmp_p + dimension,
dev_tmp_y, 0.0f);
// std::cout << "tmp = " << tmp << std::endl;
const float alpha = res / tmp;
// std::cout << "alpha = " << alpha << std::endl;
// x += alpha * p;
thrust::transform(dev_tmp_p, dev_tmp_p + dimension, dev_evolution,
dev_evolution, saxpy_functor(alpha));
// r += alpha * y;
thrust::transform(dev_tmp_y, dev_tmp_y + dimension, dev_tmp_r, dev_tmp_r,
saxpy_functor(alpha));
// const float new_res = r.squaredNorm();
const float new_res = thrust::inner_product(
dev_tmp_r, dev_tmp_r + dimension, dev_tmp_r, 0.0f);
// std::cout << "res = " << res << "\tnew_res = " << new_res << std::endl;
const float beta = new_res / res;
res = new_res;
// p = beta * p - r;
thrust::transform(dev_tmp_p, dev_tmp_p + dimension, dev_tmp_r, dev_tmp_p,
saxpby_functor(beta, -1.0f));
++it;
}
// std::cout << "res = " << res << "\tit = " << it << std::endl;
// x = x + dt * y;
thrust::transform(dev_evolution, dev_evolution + dimension, dev_wave,
dev_wave, saxpy_functor(dt));
}
void Wave_solver::copy_wave(float* output) {
thrust::device_ptr<float> dev_wave(wave);
thrust::copy(dev_wave, dev_wave + dimension, output);
}
} // namespace Fem
} // namespace Femog
|
9228609a6ac68a4cee4dc8e7d8a366bafe5b1560.cu
|
#include "gpu_wave_solver.h"
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/transform.h>
namespace Femog {
namespace Fem {
namespace {
struct saxpy_functor : public thrust::binary_function<float, float, float> {
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__ float operator()(const float& x, const float& y) const {
return a * x + y;
}
};
struct saxpby_functor : public thrust::binary_function<float, float, float> {
const float a;
const float b;
saxpby_functor(float _a, float _b) : a(_a), b(_b) {}
__host__ __device__ float operator()(const float& x, const float& y) const {
return a * x + b * y;
}
};
__global__ void spmv_csr_kernel(int n, float alpha, const float* values,
const int* row_cdf, const int* col_index,
const float* input, float beta, float* output) {
const int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < n) {
float dot = 0;
const int start = row_cdf[row];
const int end = row_cdf[row + 1];
for (int j = start; j < end; ++j) dot += values[j] * input[col_index[j]];
output[row] = beta * output[row] + alpha * dot;
}
}
__global__ void rhs_kernel(int n, const float* mass_values,
const float* stiffness_values, const int* row_cdf,
const int* col_index, const float* wave_values,
const float* evolution_values, float dt,
float* output) {
const int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < n) {
float mass_dot = 0;
float stiffness_dot = 0;
const int start = row_cdf[row];
const int end = row_cdf[row + 1];
for (int j = start; j < end; ++j) {
mass_dot += mass_values[j] * evolution_values[col_index[j]];
stiffness_dot += stiffness_values[j] * wave_values[col_index[j]];
}
output[row] = mass_dot - dt * stiffness_dot;
}
}
__global__ void spmv_csr_vector_kernel(int n, float alpha, const float* values,
const int* row_cdf, const int* col_index,
const float* input, float beta,
float* output) {
__shared__ float vals[1024];
const int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
const int warp_id = thread_id / 32;
const int lane = thread_id & (32 - 1);
const int row = warp_id;
if (row < n) {
const int start = row_cdf[row];
const int end = row_cdf[row + 1];
vals[threadIdx.x] = 0;
for (int j = start + lane; j < end; j += 32)
vals[threadIdx.x] += values[j] * input[col_index[j]];
__syncthreads();
if (lane < 16) vals[threadIdx.x] += vals[threadIdx.x + 16];
__syncthreads();
if (lane < 8) vals[threadIdx.x] += vals[threadIdx.x + 8];
__syncthreads();
if (lane < 4) vals[threadIdx.x] += vals[threadIdx.x + 4];
__syncthreads();
if (lane < 2) vals[threadIdx.x] += vals[threadIdx.x + 2];
__syncthreads();
if (lane < 1) vals[threadIdx.x] += vals[threadIdx.x + 1];
__syncthreads();
if (lane == 0) output[row] = beta * output[row] + alpha * vals[threadIdx.x];
}
}
} // namespace
Wave_solver::Wave_solver(int n, float* mass, float* stiffness, int* row,
int* col, float* wave_data, float* evolution_data)
: dimension(n), nnz(row[n]) {
// std::cout << "dimension = " << dimension << "\nnnz = " << nnz << std::endl;
// thrust::copy(mass, mass + nnz, std::ostream_iterator<float>(std::cout,
// ","));
cudaMalloc((void**)&mass_values, nnz * sizeof(float));
cudaMalloc((void**)&stiffness_values, nnz * sizeof(float));
cudaMalloc((void**)&wave, dimension * sizeof(float));
cudaMalloc((void**)&evolution, dimension * sizeof(float));
cudaMalloc((void**)&row_cdf, (dimension + 1) * sizeof(int));
cudaMalloc((void**)&col_index, nnz * sizeof(int));
cudaMemcpy(mass_values, mass, nnz * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(stiffness_values, stiffness, nnz * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(wave, wave_data, dimension * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(evolution, evolution_data, dimension * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(row_cdf, row, (dimension + 1) * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(col_index, col, nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&tmp_p, dimension * sizeof(float));
cudaMalloc((void**)&tmp_r, dimension * sizeof(float));
cudaMalloc((void**)&tmp_y, dimension * sizeof(float));
cudaDeviceProp property;
// int count;
// cudaGetDeviceCount(&count);
// for (int i = 0; i < count; ++i){
cudaGetDeviceProperties(&property, 0);
// }
threads_per_block = property.maxThreadsPerBlock;
blocks = (dimension + threads_per_block - 1) / threads_per_block;
std::cout << "CUDA Device Name = " << property.name << std::endl
<< "total global memory = " << property.totalGlobalMem << std::endl
<< "shared memory per block = " << property.sharedMemPerBlock
<< std::endl
<< "total const memory = " << property.totalConstMem << std::endl
<< "warp size = " << property.warpSize << std::endl
<< "maximum threads per block = " << property.maxThreadsPerBlock
<< std::endl;
std::cout << "maximum threads dimension = (";
std::copy(property.maxThreadsDim, property.maxThreadsDim + 3,
std::ostream_iterator<int>(std::cout, ","));
std::cout << ")" << std::endl;
std::cout << "maximum block dimension = (";
std::copy(property.maxGridSize, property.maxGridSize + 3,
std::ostream_iterator<int>(std::cout, ","));
std::cout << ")" << std::endl << std::endl;
std::cout << "used threads_per_block = " << threads_per_block << std::endl
<< "used blocks = " << blocks << std::endl;
}
Wave_solver::~Wave_solver() {
cudaFree(mass_values);
cudaFree(stiffness_values);
cudaFree(wave);
cudaFree(evolution);
cudaFree(row_cdf);
cudaFree(col_index);
cudaFree(tmp_p);
cudaFree(tmp_r);
cudaFree(tmp_y);
}
void Wave_solver::operator()(float c, float dt) {
thrust::device_ptr<float> dev_evolution =
thrust::device_pointer_cast(evolution);
thrust::device_ptr<float> dev_wave(wave);
thrust::device_ptr<float> dev_tmp_y(tmp_y);
thrust::device_ptr<float> dev_tmp_p(tmp_p);
thrust::device_ptr<float> dev_tmp_r(tmp_r);
// Eigen::VectorXf rhs =
// (1.0f - gamma * dt) * mass_matrix * y - dt * c * c * stiffness_matrix *
// x;
// spmv_csr_kernel<<<blocks, threads_per_block>>>(
// dimension, 1.0f, mass_values, row_cdf, col_index, evolution, 0.0f,
// tmp_y);
// spmv_csr_kernel<<<blocks, threads_per_block>>>(dimension, -dt * c * c,
// stiffness_values, row_cdf,
// col_index, wave, 1.0f,
// tmp_y);
rhs_kernel<<<blocks, threads_per_block>>>(
dimension, mass_values, stiffness_values, row_cdf, col_index, wave,
evolution, c * c * dt, tmp_r);
// Eigen::VectorXf r = A * x - b;
spmv_csr_kernel<<<blocks, threads_per_block>>>(dimension, 1.0f, mass_values,
row_cdf, col_index, evolution,
-1.0f, tmp_r);
// thrust::copy(dev_tmp_y, dev_tmp_y + dimension, dev_tmp_r);
// Eigen::VectorXf p = -r;
thrust::transform(dev_tmp_r, dev_tmp_r + dimension, dev_tmp_p,
thrust::negate<float>());
// float res = r.squaredNorm();
float res =
thrust::inner_product(dev_tmp_r, dev_tmp_r + dimension, dev_tmp_r, 0.0f);
// std::cout << "res = " << res << std::endl;
int it = 0;
// for (auto i = 0; i < dimension; ++i) {
while ((it < 1 || res > 1e-6f) && it < dimension) {
// y = A * p;
spmv_csr_kernel<<<blocks, threads_per_block>>>(
dimension, 1.0f, mass_values, row_cdf, col_index, tmp_p, 0.0f, tmp_y);
// thrust::copy(dev_tmp_y, dev_tmp_y + dimension,
// std::ostream_iterator<float>(std::cout, " "));
// const float alpha = res / p.dot(y);
const float tmp = thrust::inner_product(dev_tmp_p, dev_tmp_p + dimension,
dev_tmp_y, 0.0f);
// std::cout << "tmp = " << tmp << std::endl;
const float alpha = res / tmp;
// std::cout << "alpha = " << alpha << std::endl;
// x += alpha * p;
thrust::transform(dev_tmp_p, dev_tmp_p + dimension, dev_evolution,
dev_evolution, saxpy_functor(alpha));
// r += alpha * y;
thrust::transform(dev_tmp_y, dev_tmp_y + dimension, dev_tmp_r, dev_tmp_r,
saxpy_functor(alpha));
// const float new_res = r.squaredNorm();
const float new_res = thrust::inner_product(
dev_tmp_r, dev_tmp_r + dimension, dev_tmp_r, 0.0f);
// std::cout << "res = " << res << "\tnew_res = " << new_res << std::endl;
const float beta = new_res / res;
res = new_res;
// p = beta * p - r;
thrust::transform(dev_tmp_p, dev_tmp_p + dimension, dev_tmp_r, dev_tmp_p,
saxpby_functor(beta, -1.0f));
++it;
}
// std::cout << "res = " << res << "\tit = " << it << std::endl;
// x = x + dt * y;
thrust::transform(dev_evolution, dev_evolution + dimension, dev_wave,
dev_wave, saxpy_functor(dt));
}
void Wave_solver::copy_wave(float* output) {
thrust::device_ptr<float> dev_wave(wave);
thrust::copy(dev_wave, dev_wave + dimension, output);
}
} // namespace Fem
} // namespace Femog
|
61ee5bdcf91a1d7aefcce0648223b1f284ca9c1a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
((Vec_CUDA*)v->spptr)->GPUarray_allocated = NULL;
}
if (((Vec_CUDA*)v->spptr)->stream) {
err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->boundtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_Seq;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->bindtocpu = VecBindToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
}
if (array) {
if (!vv->spptr) {
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->stream = 0; /* using default stream */
veccuda->GPUarray_allocated = 0;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
vv->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
}
PetscFunctionReturn(0);
}
|
61ee5bdcf91a1d7aefcce0648223b1f284ca9c1a.cu
|
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
((Vec_CUDA*)v->spptr)->GPUarray_allocated = NULL;
}
if (((Vec_CUDA*)v->spptr)->stream) {
err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->boundtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_Seq;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->bindtocpu = VecBindToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
}
if (array) {
if (!vv->spptr) {
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->stream = 0; /* using default stream */
veccuda->GPUarray_allocated = 0;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
vv->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
}
PetscFunctionReturn(0);
}
|
a929b2d9101a1e064e2f11fc37d8af112d54ed17.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/rotate/rotate.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./rotate.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
static const int BX = 8;
static const int BY = 8;
namespace {
#define rep(i, n) for (size_t i = 0; i < (n); ++i)
template <typename T, bool clockwise, size_t IC>
__global__ void rotate_kern(const T* src, T* dst, size_t N, size_t IH,
size_t IW, size_t istride0, size_t istride1,
size_t istride2, size_t OH, size_t OW,
size_t ostride0, size_t ostride1, size_t ostride2) {
int iw = blockIdx.x * blockDim.x + threadIdx.x;
int ih = blockIdx.y * blockDim.y + threadIdx.y;
if (iw < IW && ih < IH) {
int ow = clockwise ? IH - ih - 1 : ih;
int oh = clockwise ? iw : IW - iw - 1;
#pragma unroll
rep(c, IC) {
dst[blockIdx.z * ostride0 + oh * ostride1 + ow * ostride2 + c] =
src[blockIdx.z * istride0 + ih * istride1 + iw * istride2 + c];
}
}
}
#undef rep
} // anonymous namespace
namespace rotate {
template <typename T, bool clockwise>
void rotate(const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH,
size_t istride0, size_t istride1, size_t istride2, size_t OH,
size_t OW, size_t ostride0, size_t ostride1, size_t ostride2,
hipStream_t stream) {
dim3 threads(BX, BY);
dim3 blocks(DIVUP(IW, BX), DIVUP(IH, BY), N);
megdnn_assert(CH == 1 || CH == 3);
if (CH == 1)
hipLaunchKernelGGL(( rotate_kern<T, clockwise, 1>), dim3(blocks), dim3(threads), 0, stream,
src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0,
ostride1, ostride2);
else
hipLaunchKernelGGL(( rotate_kern<T, clockwise, 3>), dim3(blocks), dim3(threads), 0, stream,
src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0,
ostride1, ostride2);
after_kernel_launch();
}
#define INST(T, clockwise) \
template void rotate<T, clockwise>( \
const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, \
size_t istride0, size_t istride1, size_t istride2, size_t OH, \
size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, \
hipStream_t stream);
#define cb(DType) \
INST(typename DTypeTrait<DType>::ctype, true) \
INST(typename DTypeTrait<DType>::ctype, false)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace rotate
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
a929b2d9101a1e064e2f11fc37d8af112d54ed17.cu
|
/**
* \file dnn/src/cuda/rotate/rotate.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./rotate.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
static const int BX = 8;
static const int BY = 8;
namespace {
#define rep(i, n) for (size_t i = 0; i < (n); ++i)
template <typename T, bool clockwise, size_t IC>
__global__ void rotate_kern(const T* src, T* dst, size_t N, size_t IH,
size_t IW, size_t istride0, size_t istride1,
size_t istride2, size_t OH, size_t OW,
size_t ostride0, size_t ostride1, size_t ostride2) {
int iw = blockIdx.x * blockDim.x + threadIdx.x;
int ih = blockIdx.y * blockDim.y + threadIdx.y;
if (iw < IW && ih < IH) {
int ow = clockwise ? IH - ih - 1 : ih;
int oh = clockwise ? iw : IW - iw - 1;
#pragma unroll
rep(c, IC) {
dst[blockIdx.z * ostride0 + oh * ostride1 + ow * ostride2 + c] =
src[blockIdx.z * istride0 + ih * istride1 + iw * istride2 + c];
}
}
}
#undef rep
} // anonymous namespace
namespace rotate {
template <typename T, bool clockwise>
void rotate(const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH,
size_t istride0, size_t istride1, size_t istride2, size_t OH,
size_t OW, size_t ostride0, size_t ostride1, size_t ostride2,
cudaStream_t stream) {
dim3 threads(BX, BY);
dim3 blocks(DIVUP(IW, BX), DIVUP(IH, BY), N);
megdnn_assert(CH == 1 || CH == 3);
if (CH == 1)
rotate_kern<T, clockwise, 1><<<blocks, threads, 0, stream>>>(
src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0,
ostride1, ostride2);
else
rotate_kern<T, clockwise, 3><<<blocks, threads, 0, stream>>>(
src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0,
ostride1, ostride2);
after_kernel_launch();
}
#define INST(T, clockwise) \
template void rotate<T, clockwise>( \
const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, \
size_t istride0, size_t istride1, size_t istride2, size_t OH, \
size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, \
cudaStream_t stream);
#define cb(DType) \
INST(typename DTypeTrait<DType>::ctype, true) \
INST(typename DTypeTrait<DType>::ctype, false)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
} // namespace rotate
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
a4ac94adc8988dedbec6fcf611ff8c4262335537.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <sys/time.h> // get time of day
#include <sys/times.h> // get time of day
#include <sys/mman.h> // mmap
#include <unistd.h> // getpid
#include <hip/hip_runtime.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK_DIMENSION 16
#define PRINT_TIME 1
#define SM_ARR_LEN 2048
#define TOL 1e-1
#define OMEGA 1.8
#define GET_SECONDS_TICS 100
#define IMUL(a, b) __mul24(a, b)
void initializeArray1D(float *arr, int len, int seed); // Initialize 2D array as concatenated sets of 1D arrays
void mmm_kij(float* a0, float* b0, float* c0);
double get_seconds() { /* routine to read time */
struct tms rusage;
times(&rusage); /* UNIX utility: time in clock ticks */
return (double)(rusage.tms_utime)/(double)(GET_SECONDS_TICS);
}
__global__ void kernel_MMM (int arrLen, float* A, float* B, float* C) {
__shared__ float tempA[NUM_THREADS_PER_BLOCK_DIMENSION][NUM_THREADS_PER_BLOCK_DIMENSION];
__shared__ float tempB[NUM_THREADS_PER_BLOCK_DIMENSION][NUM_THREADS_PER_BLOCK_DIMENSION];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int Col = bx*blockDim.x + tx;
int Row = by*blockDim.y + ty;
float Pvalue = 0.0;
for (int m =0 ; m < arrLen/NUM_THREADS_PER_BLOCK_DIMENSION; m++){
tempA[ty][tx] = A[Row*arrLen + (m*NUM_THREADS_PER_BLOCK_DIMENSION + tx)];
tempB[ty][tx] = B[Col + (m*NUM_THREADS_PER_BLOCK_DIMENSION + ty)*arrLen];
__syncthreads();
for (int k = 0; k < NUM_THREADS_PER_BLOCK_DIMENSION; k++)
Pvalue += tempA[ty][k] * tempB[k][tx];
__syncthreads();
}
C[Row*arrLen+Col] = Pvalue;
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
hipEvent_t start, stop;
float elapsed_gpu;
double sec;
// Arrays on GPU global memoryc
float *d_arrayA;
float *d_arrayB;
float *d_arrayC;
// Arrays on the host memory
float *h_arrayA;
float *h_arrayB;
float *h_arrayC_CPU;
float *h_arrayC_GPU;
int i, errCount = 0, zeroCount = 0;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(hipSetDevice(0));
// Set block dimensions
dim3 threadsPerBlock(NUM_THREADS_PER_BLOCK_DIMENSION, NUM_THREADS_PER_BLOCK_DIMENSION);
dim3 NUM_BLOCKS(arrLen/NUM_THREADS_PER_BLOCK_DIMENSION,arrLen/NUM_THREADS_PER_BLOCK_DIMENSION);
// Allocate GPU memory
size_t allocSize = arrLen * arrLen * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_arrayA, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_arrayB, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_arrayC, allocSize));
// Allocate arrays on host memory
h_arrayA = (float *) malloc(allocSize);
h_arrayB = (float *) malloc(allocSize);
h_arrayC_CPU = (float *) malloc(allocSize);
h_arrayC_GPU = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(h_arrayA, arrLen, 2453);
initializeArray1D(h_arrayB, arrLen, 2453);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(hipMemcpy(d_arrayA, h_arrayA, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_arrayB, h_arrayB, allocSize, hipMemcpyHostToDevice));
// Launch the kernel
hipLaunchKernelGGL(( kernel_MMM) , dim3(NUM_BLOCKS), dim3(threadsPerBlock) , 0, 0, arrLen, d_arrayA, d_arrayB, d_arrayC);
// Check for errors during launch
CUDA_SAFE_CALL(hipPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(hipMemcpy(h_arrayC_GPU, d_arrayC, allocSize, hipMemcpyDeviceToHost));
#if PRINT_TIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
hipEventDestroy(start);
hipEventDestroy(stop);
#endif
// Compute the results on the host
printf("Starting CPU Computation\n");
sec = get_seconds();
long int j, k;
float r;
int length = arrLen;
for (k = 0; k < length; k++)
for (i = 0; i < length; i++) {
r = h_arrayA[i*length+k];
for (j = 0; j < length; j++)
h_arrayC_CPU[i*length+j] += r*h_arrayB[k*length+j];
}
sec = (get_seconds() - sec);
printf("\n CPUTime = %f (msec)\n", sec*1000.0);
// Compare the results
printf("Comparing Results\n");
for(i = 0; i < arrLen*arrLen; i++) {
if (abs((h_arrayC_CPU[i] - h_arrayC_GPU[i])/h_arrayC_GPU[i]) > TOL) {
errCount++;
}
if (h_arrayC_GPU[i] == 0) {
zeroCount++;
}
}
if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}
// Free-up device and host memory
CUDA_SAFE_CALL(hipFree(d_arrayA));
CUDA_SAFE_CALL(hipFree(d_arrayB));
CUDA_SAFE_CALL(hipFree(d_arrayC));
free(h_arrayC_CPU);
free(h_arrayC_GPU);
free(h_arrayA);
free(h_arrayB);
return 0;
}
void initializeArray1D(float *arr, int len, int seed) {
int i;
float randNum;
srand(seed);
for (i = 0; i < len*len; i++) {
randNum = (float) rand();
arr[i] = randNum;
}
}
|
a4ac94adc8988dedbec6fcf611ff8c4262335537.cu
|
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <sys/time.h> // get time of day
#include <sys/times.h> // get time of day
#include <sys/mman.h> // mmap
#include <unistd.h> // getpid
#include <cuda.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK_DIMENSION 16
#define PRINT_TIME 1
#define SM_ARR_LEN 2048
#define TOL 1e-1
#define OMEGA 1.8
#define GET_SECONDS_TICS 100
#define IMUL(a, b) __mul24(a, b)
void initializeArray1D(float *arr, int len, int seed); // Initialize 2D array as concatenated sets of 1D arrays
void mmm_kij(float* a0, float* b0, float* c0);
double get_seconds() { /* routine to read time */
struct tms rusage;
times(&rusage); /* UNIX utility: time in clock ticks */
return (double)(rusage.tms_utime)/(double)(GET_SECONDS_TICS);
}
__global__ void kernel_MMM (int arrLen, float* A, float* B, float* C) {
__shared__ float tempA[NUM_THREADS_PER_BLOCK_DIMENSION][NUM_THREADS_PER_BLOCK_DIMENSION];
__shared__ float tempB[NUM_THREADS_PER_BLOCK_DIMENSION][NUM_THREADS_PER_BLOCK_DIMENSION];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int Col = bx*blockDim.x + tx;
int Row = by*blockDim.y + ty;
float Pvalue = 0.0;
for (int m =0 ; m < arrLen/NUM_THREADS_PER_BLOCK_DIMENSION; m++){
tempA[ty][tx] = A[Row*arrLen + (m*NUM_THREADS_PER_BLOCK_DIMENSION + tx)];
tempB[ty][tx] = B[Col + (m*NUM_THREADS_PER_BLOCK_DIMENSION + ty)*arrLen];
__syncthreads();
for (int k = 0; k < NUM_THREADS_PER_BLOCK_DIMENSION; k++)
Pvalue += tempA[ty][k] * tempB[k][tx];
__syncthreads();
}
C[Row*arrLen+Col] = Pvalue;
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
double sec;
// Arrays on GPU global memoryc
float *d_arrayA;
float *d_arrayB;
float *d_arrayC;
// Arrays on the host memory
float *h_arrayA;
float *h_arrayB;
float *h_arrayC_CPU;
float *h_arrayC_GPU;
int i, errCount = 0, zeroCount = 0;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(0));
// Set block dimensions
dim3 threadsPerBlock(NUM_THREADS_PER_BLOCK_DIMENSION, NUM_THREADS_PER_BLOCK_DIMENSION);
dim3 NUM_BLOCKS(arrLen/NUM_THREADS_PER_BLOCK_DIMENSION,arrLen/NUM_THREADS_PER_BLOCK_DIMENSION);
// Allocate GPU memory
size_t allocSize = arrLen * arrLen * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_arrayA, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_arrayB, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_arrayC, allocSize));
// Allocate arrays on host memory
h_arrayA = (float *) malloc(allocSize);
h_arrayB = (float *) malloc(allocSize);
h_arrayC_CPU = (float *) malloc(allocSize);
h_arrayC_GPU = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(h_arrayA, arrLen, 2453);
initializeArray1D(h_arrayB, arrLen, 2453);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_arrayA, h_arrayA, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_arrayB, h_arrayB, allocSize, cudaMemcpyHostToDevice));
// Launch the kernel
kernel_MMM <<<NUM_BLOCKS, threadsPerBlock >>>(arrLen, d_arrayA, d_arrayB, d_arrayC);
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_arrayC_GPU, d_arrayC, allocSize, cudaMemcpyDeviceToHost));
#if PRINT_TIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
// Compute the results on the host
printf("Starting CPU Computation\n");
sec = get_seconds();
long int j, k;
float r;
int length = arrLen;
for (k = 0; k < length; k++)
for (i = 0; i < length; i++) {
r = h_arrayA[i*length+k];
for (j = 0; j < length; j++)
h_arrayC_CPU[i*length+j] += r*h_arrayB[k*length+j];
}
sec = (get_seconds() - sec);
printf("\n CPUTime = %f (msec)\n", sec*1000.0);
// Compare the results
printf("Comparing Results\n");
for(i = 0; i < arrLen*arrLen; i++) {
if (abs((h_arrayC_CPU[i] - h_arrayC_GPU[i])/h_arrayC_GPU[i]) > TOL) {
errCount++;
}
if (h_arrayC_GPU[i] == 0) {
zeroCount++;
}
}
if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}
// Free-up device and host memory
CUDA_SAFE_CALL(cudaFree(d_arrayA));
CUDA_SAFE_CALL(cudaFree(d_arrayB));
CUDA_SAFE_CALL(cudaFree(d_arrayC));
free(h_arrayC_CPU);
free(h_arrayC_GPU);
free(h_arrayA);
free(h_arrayB);
return 0;
}
void initializeArray1D(float *arr, int len, int seed) {
int i;
float randNum;
srand(seed);
for (i = 0; i < len*len; i++) {
randNum = (float) rand();
arr[i] = randNum;
}
}
|
b1b27a97f25c6c7d471fe20eca12daca6ba268ba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <chrono>
#include <iostream>
#include "lzlocal.h"
#include "bitfile.h"
#include "matcher_base.h"
#define checkError(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__global__ void FindMatchBatchKernel(char *buffer, int bufferSize, int *matches_length, int *matches_offset, int bufferSizeAdjusted, int currentMatchCount, bool isLast)
{
int idX = blockIdx.x * blockDim.x + threadIdx.x;
int i = WINDOW_SIZE + idX;
int beginSearch = idX;
if (i >= bufferSizeAdjusted)
{
return;
}
int length = 0;
int offset = 0;
int windowHead = (currentMatchCount + idX) % WINDOW_SIZE;
int currentOffset = 0;
// printf("%i",i);
#define optimized
int j = 0;
#ifdef optimized
char current[MAX_CODED];
for (int j = 0; j < MAX_CODED && i + j < bufferSizeAdjusted; j++)
{
current[j] = buffer[i + j];
}
int qq = WINDOW_SIZE;
int numSubMat = qq / BLOCK_SIZE;
__shared__ char bufferCache[WINDOW_SIZE + BLOCK_SIZE];
const int beginCache = blockIdx.x * blockDim.x;
for (int k = 0; k < numSubMat; k++)
{
if ((beginCache) + BLOCK_SIZE * k + threadIdx.x < bufferSize)
bufferCache[BLOCK_SIZE * k + threadIdx.x] = buffer[(beginCache) + BLOCK_SIZE * k + threadIdx.x]; //buffer[beginSearch + numSubMat * k + threadIdx.x];
}
if (beginCache + WINDOW_SIZE + threadIdx.x < bufferSize)
bufferCache[WINDOW_SIZE + threadIdx.x] = buffer[beginCache + WINDOW_SIZE + threadIdx.x]; //buffer[beginSearch + numSubMat * k + threadIdx.x];
__syncthreads();
const int beginInBufferCache = idX - blockIdx.x * blockDim.x;
#else
char *current = buffer + i;
#endif
// const int showThis = 4096 * 2;
// if(idX == showThis){
// printf("AAA %i %i\n", blockIdx.x*blockDim.x - WINDOW_SIZE,idX - blockIdx.x*blockDim.x);
// for(int k = 0; k < WINDOW_SIZE ;k++){
// printf("%c",bufferCache[beginInBufferCache+ k]);
// }
// for(int k = 0; k < 10;k++)
// printf("\n");
// for(int k = 0; k < WINDOW_SIZE;k++){
// printf("%c",buffer[beginSearch + k]);
// }
// for(int k = 0; k < 10;k++)
// printf("\n");
// }
while (1)
{
#ifdef optimized
if (current[0] == bufferCache[beginInBufferCache + Wrap((currentOffset), WINDOW_SIZE)])
#else
if (current[0] == buffer[beginSearch + Wrap((currentOffset), WINDOW_SIZE)])
#endif
{
/* we matched one. how many more match? */
j = 1;
while (
#ifdef optimized
current[j] == bufferCache[beginInBufferCache + Wrap((currentOffset + j), WINDOW_SIZE)]
#else
current[j] == buffer[beginSearch + Wrap((currentOffset + j), WINDOW_SIZE)]
#endif
&& (!isLast ||
(beginSearch + Wrap((currentOffset + j), WINDOW_SIZE) < bufferSizeAdjusted && i + j < bufferSizeAdjusted)))
{
if (j >= MAX_CODED)
{
break;
}
j++;
}
if (j > length)
{
length = j;
offset = Wrap((currentOffset + windowHead), WINDOW_SIZE);
}
}
if (j >= MAX_CODED)
{
length = MAX_CODED;
break;
}
currentOffset++;
if (currentOffset == WINDOW_SIZE)
{
break;
}
}
matches_offset[idX] = offset;
matches_length[idX] = length;
}
int MatcherCuda::Init()
{
MatcherBase::Init();
return 0;
}
int MatcherCuda::FindMatchBatch(char *buffer, int bufferSize, int *matches_length, int *matches_offset, int *matchSize, bool isLast, int currentMatchCount)
{
int bufferSizeAdjusted = bufferSize - MAX_CODED;
if (isLast)
{
bufferSizeAdjusted += MAX_CODED;
}
int matchCount = bufferSizeAdjusted - WINDOW_SIZE;
*matchSize = matchCount;
int sizeToLaunch = matchCount;
int blocks = sizeToLaunch / BLOCK_SIZE + (sizeToLaunch % BLOCK_SIZE > 0 ? 1 : 0);
int threads = BLOCK_SIZE;
char *d_buffer;
int *d_matches_length;
int *d_matches_offset;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float miliseconds;
hipEventRecord(start, 0);
//std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
checkError(hipMalloc((void **)&d_buffer, sizeof(char) * bufferSize));
checkError(hipMalloc((void **)&d_matches_length, sizeof(int) * matchCount));
checkError(hipMalloc((void **)&d_matches_offset, sizeof(int) * matchCount));
checkError(hipMemcpy(d_buffer, buffer, sizeof(char) * bufferSize, hipMemcpyHostToDevice));
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&miliseconds, start, stop);
timeSpentOnMemoryHostToDevice += miliseconds;
//std::chrono::steady_clock::time_point end= std::chrono::steady_clock::now();
//timeSpentOnMemoryHostToDevice += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
//begin = std::chrono::steady_clock::now();
hipEventRecord(start, 0);
hipLaunchKernelGGL(( FindMatchBatchKernel), dim3(blocks), dim3(threads), 0, 0, d_buffer, bufferSize, d_matches_length, d_matches_offset, bufferSizeAdjusted, currentMatchCount, isLast);
checkError(hipPeekAtLastError());
//end= std::chrono::steady_clock::now();
//timeSpentOnKernel += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&miliseconds, start, stop);
timeSpentOnKernel += miliseconds;
// hipDeviceSynchronize();
//begin = std::chrono::steady_clock::now();
hipEventRecord(start, 0);
checkError(hipMemcpy(matches_offset, d_matches_offset, sizeof(int) * matchCount, hipMemcpyDeviceToHost));
checkError(hipMemcpy(matches_length, d_matches_length, sizeof(int) * matchCount, hipMemcpyDeviceToHost));
hipFree(d_buffer);
hipFree(d_matches_length);
hipFree(d_matches_offset);
//end= std::chrono::steady_clock::now();
//timeSpentOnMemoryDeviceToHost += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&miliseconds, start, stop);
timeSpentOnMemoryDeviceToHost += miliseconds;
return 0;
}
|
b1b27a97f25c6c7d471fe20eca12daca6ba268ba.cu
|
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <chrono>
#include <iostream>
#include "lzlocal.h"
#include "bitfile.h"
#include "matcher_base.h"
#define checkError(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__global__ void FindMatchBatchKernel(char *buffer, int bufferSize, int *matches_length, int *matches_offset, int bufferSizeAdjusted, int currentMatchCount, bool isLast)
{
int idX = blockIdx.x * blockDim.x + threadIdx.x;
int i = WINDOW_SIZE + idX;
int beginSearch = idX;
if (i >= bufferSizeAdjusted)
{
return;
}
int length = 0;
int offset = 0;
int windowHead = (currentMatchCount + idX) % WINDOW_SIZE;
int currentOffset = 0;
// printf("%i",i);
#define optimized
int j = 0;
#ifdef optimized
char current[MAX_CODED];
for (int j = 0; j < MAX_CODED && i + j < bufferSizeAdjusted; j++)
{
current[j] = buffer[i + j];
}
int qq = WINDOW_SIZE;
int numSubMat = qq / BLOCK_SIZE;
__shared__ char bufferCache[WINDOW_SIZE + BLOCK_SIZE];
const int beginCache = blockIdx.x * blockDim.x;
for (int k = 0; k < numSubMat; k++)
{
if ((beginCache) + BLOCK_SIZE * k + threadIdx.x < bufferSize)
bufferCache[BLOCK_SIZE * k + threadIdx.x] = buffer[(beginCache) + BLOCK_SIZE * k + threadIdx.x]; //buffer[beginSearch + numSubMat * k + threadIdx.x];
}
if (beginCache + WINDOW_SIZE + threadIdx.x < bufferSize)
bufferCache[WINDOW_SIZE + threadIdx.x] = buffer[beginCache + WINDOW_SIZE + threadIdx.x]; //buffer[beginSearch + numSubMat * k + threadIdx.x];
__syncthreads();
const int beginInBufferCache = idX - blockIdx.x * blockDim.x;
#else
char *current = buffer + i;
#endif
// const int showThis = 4096 * 2;
// if(idX == showThis){
// printf("AAA %i %i\n", blockIdx.x*blockDim.x - WINDOW_SIZE,idX - blockIdx.x*blockDim.x);
// for(int k = 0; k < WINDOW_SIZE ;k++){
// printf("%c",bufferCache[beginInBufferCache+ k]);
// }
// for(int k = 0; k < 10;k++)
// printf("\n");
// for(int k = 0; k < WINDOW_SIZE;k++){
// printf("%c",buffer[beginSearch + k]);
// }
// for(int k = 0; k < 10;k++)
// printf("\n");
// }
while (1)
{
#ifdef optimized
if (current[0] == bufferCache[beginInBufferCache + Wrap((currentOffset), WINDOW_SIZE)])
#else
if (current[0] == buffer[beginSearch + Wrap((currentOffset), WINDOW_SIZE)])
#endif
{
/* we matched one. how many more match? */
j = 1;
while (
#ifdef optimized
current[j] == bufferCache[beginInBufferCache + Wrap((currentOffset + j), WINDOW_SIZE)]
#else
current[j] == buffer[beginSearch + Wrap((currentOffset + j), WINDOW_SIZE)]
#endif
&& (!isLast ||
(beginSearch + Wrap((currentOffset + j), WINDOW_SIZE) < bufferSizeAdjusted && i + j < bufferSizeAdjusted)))
{
if (j >= MAX_CODED)
{
break;
}
j++;
}
if (j > length)
{
length = j;
offset = Wrap((currentOffset + windowHead), WINDOW_SIZE);
}
}
if (j >= MAX_CODED)
{
length = MAX_CODED;
break;
}
currentOffset++;
if (currentOffset == WINDOW_SIZE)
{
break;
}
}
matches_offset[idX] = offset;
matches_length[idX] = length;
}
int MatcherCuda::Init()
{
MatcherBase::Init();
return 0;
}
int MatcherCuda::FindMatchBatch(char *buffer, int bufferSize, int *matches_length, int *matches_offset, int *matchSize, bool isLast, int currentMatchCount)
{
int bufferSizeAdjusted = bufferSize - MAX_CODED;
if (isLast)
{
bufferSizeAdjusted += MAX_CODED;
}
int matchCount = bufferSizeAdjusted - WINDOW_SIZE;
*matchSize = matchCount;
int sizeToLaunch = matchCount;
int blocks = sizeToLaunch / BLOCK_SIZE + (sizeToLaunch % BLOCK_SIZE > 0 ? 1 : 0);
int threads = BLOCK_SIZE;
char *d_buffer;
int *d_matches_length;
int *d_matches_offset;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float miliseconds;
cudaEventRecord(start, 0);
//std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
checkError(cudaMalloc((void **)&d_buffer, sizeof(char) * bufferSize));
checkError(cudaMalloc((void **)&d_matches_length, sizeof(int) * matchCount));
checkError(cudaMalloc((void **)&d_matches_offset, sizeof(int) * matchCount));
checkError(cudaMemcpy(d_buffer, buffer, sizeof(char) * bufferSize, cudaMemcpyHostToDevice));
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&miliseconds, start, stop);
timeSpentOnMemoryHostToDevice += miliseconds;
//std::chrono::steady_clock::time_point end= std::chrono::steady_clock::now();
//timeSpentOnMemoryHostToDevice += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
//begin = std::chrono::steady_clock::now();
cudaEventRecord(start, 0);
FindMatchBatchKernel<<<blocks, threads>>>(d_buffer, bufferSize, d_matches_length, d_matches_offset, bufferSizeAdjusted, currentMatchCount, isLast);
checkError(cudaPeekAtLastError());
//end= std::chrono::steady_clock::now();
//timeSpentOnKernel += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&miliseconds, start, stop);
timeSpentOnKernel += miliseconds;
// cudaDeviceSynchronize();
//begin = std::chrono::steady_clock::now();
cudaEventRecord(start, 0);
checkError(cudaMemcpy(matches_offset, d_matches_offset, sizeof(int) * matchCount, cudaMemcpyDeviceToHost));
checkError(cudaMemcpy(matches_length, d_matches_length, sizeof(int) * matchCount, cudaMemcpyDeviceToHost));
cudaFree(d_buffer);
cudaFree(d_matches_length);
cudaFree(d_matches_offset);
//end= std::chrono::steady_clock::now();
//timeSpentOnMemoryDeviceToHost += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count();
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&miliseconds, start, stop);
timeSpentOnMemoryDeviceToHost += miliseconds;
return 0;
}
|
f8341a1df6747af16d68f277f5fe9b815208ff88.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "definitions.cuh"
#include <time.h>
//Number of elements on which to perform CFD
unsigned int Ni = 512; // Y elements
unsigned int Nj = 512; // X elements
unsigned int nIterations = 10000; // No Of Iterations
unsigned int kernelVersion =2; // Decides which GPU kernel version to call (Set it to 1 or 2)
int main(int argc, char** argv)
{
//Variables for Timing
float cpuTime, gpuTime;
// CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention)
float *t = NULL, *t_prev = NULL;
float *d_t = NULL,*d_t_prev= NULL;
parseCommandLineArguments(argc, (char **)argv);
printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations);
unsigned int size = Ni * Nj * sizeof(float);
if(!initializeCPU(&t, &t_prev) )
{
printf("\n Error in allocating memory on CPU!!!");
unInitializeCPU(&t, &t_prev);
getchar();
return 0;
}
if (!initializeGPU(&d_t, &d_t_prev))
{
printf("\n Error in allocating memory on GPU!!!");
unInitializeCPU(&t, &t_prev);
unInitializeGPU(&d_t, &d_t_prev);
return 0;
}
//Perform CFD on CPU
performCPUCFD(t,t_prev, &cpuTime);
// To temporarily store CPU data. This is just for comparing with GPU output
float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float));
memcpy(tempBuffer, t_prev, size);
//Perform CFD on GPU
if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime))
{
printf("\n GPU Kernel failed !!!");
unInitializeCPU(&t, &t_prev);
unInitializeGPU(&d_t, &d_t_prev);
if(tempBuffer !=NULL)
free(tempBuffer);
return 0;
}
printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t));
printf("\n Speedup = %fx", (float)(cpuTime/gpuTime));
unInitializeCPU(&t, &t_prev);
unInitializeGPU(&d_t, &d_t_prev);
if(tempBuffer !=NULL)
free(tempBuffer);
printf("\n Finished Processing!!!");
getchar();
}
void parseCommandLineArguments(int argc, char**argv)
{
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int bFirstArgIsParam = false;
int string_start = 0;
while (argv[i][string_start] == '-')
string_start++;
char *string_argv = &argv[i][string_start];
if (!STRNCASECMP(string_argv, "Ni=", 3))
{
bFirstArgIsParam = true;
Ni = atoi(&string_argv[3]);
continue;
}
if (!STRNCASECMP(string_argv, "Nj=", 3))
{
bFirstArgIsParam = true;
Nj = atoi(&string_argv[3]);
continue;
}
if (!STRNCASECMP(string_argv, "iterations=", 11))
{
bFirstArgIsParam = true;
nIterations = atoi(&string_argv[11]);
continue;
}
if (!STRNCASECMP(string_argv, "kernel=", 7))
{
bFirstArgIsParam = true;
kernelVersion = atoi(&string_argv[7]);
continue;
}
if (!bFirstArgIsParam)
{
printf("Invalid arguments\n");
for (int n=0; n < argc; n++)
{
printf("argv[%d] = %s\n", n, argv[n]);
}
printf("\n");
exit(0);
}
}
}
if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0))
{
fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!");
getchar();
exit(0);
}
}
int initializeCPU(float **t, float **t_prev)
{
*t = (float*) calloc(Ni*Nj, sizeof(float));
*t_prev = (float*) calloc(Ni*Nj, sizeof(float));
if((*t)==NULL || (*t_prev) == NULL)
return 0;
else
return 1;
}
void unInitializeCPU(float **t, float **t_prev)
{
if((*t) !=NULL)
free(*t);
if((*t_prev) != NULL)
free(*t_prev);
}
int initializeGPU(float **d_t, float **d_t_prev)
{
unsigned int size = Ni * Nj * sizeof(float);
// Choose which GPU to run on, change this on a multi-GPU system.
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
getchar();
return 0;
}
// Allocate GPU buffers.
cudaStatus = hipMalloc((void**)&(*d_t), size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
getchar();
return 0;
}
// Allocate GPU buffers .
cudaStatus = hipMalloc((void**)&(*d_t_prev), size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
getchar();
return 0;
}
// Memset GPU buffers
cudaStatus = hipMemset((*d_t),0, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemset failed!");
getchar();
return 0;
}
// Memset GPU buffers
cudaStatus = hipMemset((*d_t_prev),0, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemset failed!");
getchar();
return 0;
}
return 1;
}
void unInitializeGPU(float **d_t, float **d_t_prev)
{
hipError_t cudaStatus;
if((*d_t)!=NULL)
cudaStatus = hipFree((*d_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipFree failed!");
return;
}
if((*d_t_prev)!=NULL)
cudaStatus = hipFree((*d_t_prev));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipFree failed!");
return;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
getchar();
return;
}
}
void performCPUCFD(float *t, float *t_prev, float *cpuTime)
{
float h,x,y;
h = 1.0f/(Ni-1);
for(unsigned int i=0;i<Ni;i++)
{
x = i*h;
t_prev[i*Nj+0] = x*x;
t_prev[i*Nj+(Nj-1)] = x*x + 1.0f;
}
for(unsigned int j=0;j < Nj; j++)
{
y = j*h;
t_prev[0*Nj+j] = y*y;
t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y;
}
float elapsedTimeInMs = 0.0f;
clock_t start = clock();
for(unsigned int k=0;k<nIterations;k++)
{
for(unsigned int j=1;j<(Nj-1);j++)
{
for(unsigned int i=1;i<(Ni-1);i++)
{
t[i*Nj+j] = 0.25f * (t_prev[(i-1)*Nj+j] + t_prev[(i+1)*Nj+j] + t_prev[i*Nj+(j-1)] +
t_prev[i*Nj+(j+1)] - 4*h*h);
}
}
float* pingPong = t_prev;
t_prev = t;
t = pingPong;
}
clock_t end = clock();
elapsedTimeInMs = (float)((end - start) * 1000 / CLOCKS_PER_SEC);
printf("\n CPU Time:: %f ms", elapsedTimeInMs);
*cpuTime = elapsedTimeInMs;
}
int performGPUCFD(float *d_t, float *d_t_prev, float *t, float *t_prev, float*gpuTime)
{
float h,x,y;
const char *str = (char*) malloc(1024); // To store error string
//Decide how many blocks per thread and how many blocks per grid
dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y);
dim3 dimGrid(Nj/dimBlock.x,Ni/dimBlock.y);
h = 1.0f/(Ni-1);
memset(t_prev, 0, sizeof(float) * Ni * Nj);
for(unsigned int i=0;i<Ni;i++)
{
x = i*h;
t_prev[i*Nj+0] = x*x;
t_prev[i*Nj+(Nj-1)] = x*x + 1.0f;
}
for(unsigned int j=0;j < Nj; j++)
{
y = j*h;
t_prev[0*Nj+j] = y*y;
t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y;
}
//Copy data to device
hipMemcpy(d_t_prev, t_prev, sizeof(float) * Ni * Nj , hipMemcpyHostToDevice);
//Insert event to calculate time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//This calls Version 1 of kernel which uses Global memory
if(kernelVersion ==1)
{
hipEventRecord(start, 0);
for(unsigned int k=0;k<nIterations;k++)
{
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( calculateCFD_V1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_t_prev,d_t, Ni, Nj, h);
float* pingPong = d_t_prev;
d_t_prev = d_t;
d_t = pingPong;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
}
//This calls Version 2 of kernel which uses optimization by copying data to shared memory
else if(kernelVersion ==2)
{
hipEventRecord(start, 0);
for(unsigned int k=0;k<nIterations;k++)
{
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( calculateCFD_V2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_t_prev,d_t, Ni, Nj, h);
float* pingPong = d_t_prev;
d_t_prev = d_t;
d_t = pingPong;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
}
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("\n GPU Time:: %f ms", elapsedTime);
*gpuTime = elapsedTime;
hipError_t cudaStatus = hipMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
str = hipGetErrorString(cudaStatus);
fprintf(stderr, "CUDA Error!:: %s\n", str);
getchar();
return 0;
}
return 1;
}
int checkHostEqualsDevice(float* o_host, float* o_device)
{
int flag =1;
float tolerance = 0.0001f;
//Compare the results
for(unsigned int j=0;j<Nj;j++)
{
for(unsigned int i=0;i<Ni;i++)
{
if( (o_host[i*Nj+j] - o_device[i*Nj+j]) >= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance)
{
printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j);
flag =0;
//getchar();
}
}
}
return flag;
}
|
f8341a1df6747af16d68f277f5fe9b815208ff88.cu
|
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "definitions.cuh"
#include <time.h>
//Number of elements on which to perform CFD
unsigned int Ni = 512; // Y elements
unsigned int Nj = 512; // X elements
unsigned int nIterations = 10000; // No Of Iterations
unsigned int kernelVersion =2; // Decides which GPU kernel version to call (Set it to 1 or 2)
int main(int argc, char** argv)
{
//Variables for Timing
float cpuTime, gpuTime;
// CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention)
float *t = NULL, *t_prev = NULL;
float *d_t = NULL,*d_t_prev= NULL;
parseCommandLineArguments(argc, (char **)argv);
printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations);
unsigned int size = Ni * Nj * sizeof(float);
if(!initializeCPU(&t, &t_prev) )
{
printf("\n Error in allocating memory on CPU!!!");
unInitializeCPU(&t, &t_prev);
getchar();
return 0;
}
if (!initializeGPU(&d_t, &d_t_prev))
{
printf("\n Error in allocating memory on GPU!!!");
unInitializeCPU(&t, &t_prev);
unInitializeGPU(&d_t, &d_t_prev);
return 0;
}
//Perform CFD on CPU
performCPUCFD(t,t_prev, &cpuTime);
// To temporarily store CPU data. This is just for comparing with GPU output
float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float));
memcpy(tempBuffer, t_prev, size);
//Perform CFD on GPU
if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime))
{
printf("\n GPU Kernel failed !!!");
unInitializeCPU(&t, &t_prev);
unInitializeGPU(&d_t, &d_t_prev);
if(tempBuffer !=NULL)
free(tempBuffer);
return 0;
}
printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t));
printf("\n Speedup = %fx", (float)(cpuTime/gpuTime));
unInitializeCPU(&t, &t_prev);
unInitializeGPU(&d_t, &d_t_prev);
if(tempBuffer !=NULL)
free(tempBuffer);
printf("\n Finished Processing!!!");
getchar();
}
void parseCommandLineArguments(int argc, char**argv)
{
if (argc >= 1)
{
for (int i=1; i < argc; i++)
{
int bFirstArgIsParam = false;
int string_start = 0;
while (argv[i][string_start] == '-')
string_start++;
char *string_argv = &argv[i][string_start];
if (!STRNCASECMP(string_argv, "Ni=", 3))
{
bFirstArgIsParam = true;
Ni = atoi(&string_argv[3]);
continue;
}
if (!STRNCASECMP(string_argv, "Nj=", 3))
{
bFirstArgIsParam = true;
Nj = atoi(&string_argv[3]);
continue;
}
if (!STRNCASECMP(string_argv, "iterations=", 11))
{
bFirstArgIsParam = true;
nIterations = atoi(&string_argv[11]);
continue;
}
if (!STRNCASECMP(string_argv, "kernel=", 7))
{
bFirstArgIsParam = true;
kernelVersion = atoi(&string_argv[7]);
continue;
}
if (!bFirstArgIsParam)
{
printf("Invalid arguments\n");
for (int n=0; n < argc; n++)
{
printf("argv[%d] = %s\n", n, argv[n]);
}
printf("\n");
exit(0);
}
}
}
if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0))
{
fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!");
getchar();
exit(0);
}
}
int initializeCPU(float **t, float **t_prev)
{
*t = (float*) calloc(Ni*Nj, sizeof(float));
*t_prev = (float*) calloc(Ni*Nj, sizeof(float));
if((*t)==NULL || (*t_prev) == NULL)
return 0;
else
return 1;
}
void unInitializeCPU(float **t, float **t_prev)
{
if((*t) !=NULL)
free(*t);
if((*t_prev) != NULL)
free(*t_prev);
}
int initializeGPU(float **d_t, float **d_t_prev)
{
unsigned int size = Ni * Nj * sizeof(float);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
getchar();
return 0;
}
// Allocate GPU buffers.
cudaStatus = cudaMalloc((void**)&(*d_t), size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
getchar();
return 0;
}
// Allocate GPU buffers .
cudaStatus = cudaMalloc((void**)&(*d_t_prev), size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
getchar();
return 0;
}
// Memset GPU buffers
cudaStatus = cudaMemset((*d_t),0, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
getchar();
return 0;
}
// Memset GPU buffers
cudaStatus = cudaMemset((*d_t_prev),0, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
getchar();
return 0;
}
return 1;
}
void unInitializeGPU(float **d_t, float **d_t_prev)
{
cudaError_t cudaStatus;
if((*d_t)!=NULL)
cudaStatus = cudaFree((*d_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaFree failed!");
return;
}
if((*d_t_prev)!=NULL)
cudaStatus = cudaFree((*d_t_prev));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaFree failed!");
return;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
getchar();
return;
}
}
void performCPUCFD(float *t, float *t_prev, float *cpuTime)
{
float h,x,y;
h = 1.0f/(Ni-1);
for(unsigned int i=0;i<Ni;i++)
{
x = i*h;
t_prev[i*Nj+0] = x*x;
t_prev[i*Nj+(Nj-1)] = x*x + 1.0f;
}
for(unsigned int j=0;j < Nj; j++)
{
y = j*h;
t_prev[0*Nj+j] = y*y;
t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y;
}
float elapsedTimeInMs = 0.0f;
clock_t start = clock();
for(unsigned int k=0;k<nIterations;k++)
{
for(unsigned int j=1;j<(Nj-1);j++)
{
for(unsigned int i=1;i<(Ni-1);i++)
{
t[i*Nj+j] = 0.25f * (t_prev[(i-1)*Nj+j] + t_prev[(i+1)*Nj+j] + t_prev[i*Nj+(j-1)] +
t_prev[i*Nj+(j+1)] - 4*h*h);
}
}
float* pingPong = t_prev;
t_prev = t;
t = pingPong;
}
clock_t end = clock();
elapsedTimeInMs = (float)((end - start) * 1000 / CLOCKS_PER_SEC);
printf("\n CPU Time:: %f ms", elapsedTimeInMs);
*cpuTime = elapsedTimeInMs;
}
int performGPUCFD(float *d_t, float *d_t_prev, float *t, float *t_prev, float*gpuTime)
{
float h,x,y;
const char *str = (char*) malloc(1024); // To store error string
//Decide how many blocks per thread and how many blocks per grid
dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y);
dim3 dimGrid(Nj/dimBlock.x,Ni/dimBlock.y);
h = 1.0f/(Ni-1);
memset(t_prev, 0, sizeof(float) * Ni * Nj);
for(unsigned int i=0;i<Ni;i++)
{
x = i*h;
t_prev[i*Nj+0] = x*x;
t_prev[i*Nj+(Nj-1)] = x*x + 1.0f;
}
for(unsigned int j=0;j < Nj; j++)
{
y = j*h;
t_prev[0*Nj+j] = y*y;
t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y;
}
//Copy data to device
cudaMemcpy(d_t_prev, t_prev, sizeof(float) * Ni * Nj , cudaMemcpyHostToDevice);
//Insert event to calculate time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//This calls Version 1 of kernel which uses Global memory
if(kernelVersion ==1)
{
cudaEventRecord(start, 0);
for(unsigned int k=0;k<nIterations;k++)
{
// Launch a kernel on the GPU with one thread for each element.
calculateCFD_V1<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h);
float* pingPong = d_t_prev;
d_t_prev = d_t;
d_t = pingPong;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
}
//This calls Version 2 of kernel which uses optimization by copying data to shared memory
else if(kernelVersion ==2)
{
cudaEventRecord(start, 0);
for(unsigned int k=0;k<nIterations;k++)
{
// Launch a kernel on the GPU with one thread for each element.
calculateCFD_V2<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h);
float* pingPong = d_t_prev;
d_t_prev = d_t;
d_t = pingPong;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
}
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n GPU Time:: %f ms", elapsedTime);
*gpuTime = elapsedTime;
cudaError_t cudaStatus = cudaMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
str = cudaGetErrorString(cudaStatus);
fprintf(stderr, "CUDA Error!:: %s\n", str);
getchar();
return 0;
}
return 1;
}
int checkHostEqualsDevice(float* o_host, float* o_device)
{
int flag =1;
float tolerance = 0.0001f;
//Compare the results
for(unsigned int j=0;j<Nj;j++)
{
for(unsigned int i=0;i<Ni;i++)
{
if( (o_host[i*Nj+j] - o_device[i*Nj+j]) >= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance)
{
printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j);
flag =0;
//getchar();
}
}
}
return flag;
}
|
05024e0270144dacd272d5e6791e64c44e8cf441.hip
|
// !!! This is a file automatically generated by hipify!!!
/* This is machine problem 2, binning
* The problem is that you have particles in a 3D domain
* which is quantized into blocks or bins. You want to figure
* out which block each particle belongs to.
* Use the atomic functions that you learned about in lecture 3
* to implement the same functionality as the reference version on the cpu.
*
* FOR EXTRA CREDIT:
* Write a version of your binning kernel that uses atomics hierarchically,
* accumulating updates first into shared memory and then merging the results
* from shared memory into the global memory.
* As a hint, think about binning particles first into a coarse grid in a first kernel,
* and then binning the particles from each coarse bin into the
* final bins in a second kernel.
*/
/*
* SUBMISSION INSTRUCTIONS
* =========================
*
* You can submit your entire working directory for this assignment
* from any of the cluster machines by using our submit script. We want to be able
* to just run "make" to compile your code.
* The submit script bundles the entire current directory into
* a submission. Thus, you use it by CDing to a the directory for your assignment,
* and running:
*
* > cd *some directory*
* > /usr/class/cs193g/bin/submit mp2
*
* This will submit the current directory as your assignment. You can submit
* as many times as you want, and we will use your last submission.
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include <hip/hip_runtime.h>
#include "mp2-util.h"
// TODO enable this to print debugging information
//const bool print_debug = true;
const bool print_debug = false;
event_pair timer;
// the particle coordinates are already normalized (in the domain [0,1] )
// gridding provides the base 2 log of how finely the domain is subdivided
// in each direction. So gridding.x == 6 means that the x-axis is subdivided
// into 64 parts. (i.e. 2^(gridding.x) = number of bins on x axis)
// Overall there cannot be more than 4B bins, so we can just concatenate the bin
// indices into a single uint.
__host__ __device__ unsigned int bin_index(float3 particle, int3 gridding)
{
unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x));
unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y));
unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z));
unsigned int index = 0;
index |= z_index;
index <<= gridding.y;
index |= y_index;
index <<= gridding.x;
index |= x_index;
return index;
}
void host_binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length)
{
for(int i=0;i<array_length;i++)
{
unsigned int bin = bin_index(particles[i],gridding);
if(bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
// let's not do the whole precrement / postcrement thing...
bin_counters[bin]++;
bins[bin*bin_size + offset] = i;
}
else {
*overflow_flag = true;
}
}
}
bool cross_check_results(int * h_bins, int * h_bins_checker, int * h_bin_counters, int * h_bin_counters_checker, int * h_particles_binids_checker, int num_particles, int num_bins, int bin_size)
{
int error = 0;
for(int i=0;i<num_bins;i++)
{
if(h_bin_counters[i] != h_bin_counters_checker[i])
{
if(print_debug) fprintf(stderr,"mismatch! bin %d: cuda:%d host:%d particles \n",i,h_bin_counters[i],h_bin_counters_checker[i]);
error = 1;
}
for(int j=0; j<bin_size;j++)
{
// record which these particles went into bin i in the reference version
if(h_bins_checker[i*bin_size+j] != -1)
{
h_particles_binids_checker[h_bins_checker[i*bin_size+j]] = i;
}
}
for(int j=0; j<bin_size;j++)
{
if(h_bins_checker[i*bin_size+j] != -1)
{
if(h_particles_binids_checker[h_bins[i*bin_size+j]] != i)
{
error = 1;
}
}
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else {
printf("Worked! CUDA and reference output match. \n");
}
return error;
}
template
<typename T>
__global__ void initialize(T *array,T value, unsigned int array_length)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid < array_length)
{
array[gid] = value;
}
}
__global__ void particle_binning(float3 *particles, int *bins, unsigned int *bin_counters, int3 gridding, int num_particles, int bin_size)
{
// Global id and bounds checking
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid >= num_particles) {
return;
}
unsigned int bin = bin_index(particles[gid], gridding);
unsigned int offset = atomicInc(&bin_counters[bin], bin_size); // Should check for overflow, but the host function does that already
bins[bin * bin_size + offset] = gid;
}
void device_binning(float3 * h_particles, int * h_bins, int * h_bin_counters, int3 gridding, int num_particles, int num_bins, unsigned int bin_size)
{
// Device pointers
float3 *d_particles = 0;
int *d_bins = 0;
unsigned int *d_bin_counters = 0;
// Cuda memory allocation
hipMalloc((void**)&d_particles, num_particles * sizeof(float3));
hipMalloc((void**)&d_bins, num_bins * bin_size * sizeof(unsigned int));
hipMalloc((void**)&d_bin_counters, num_bins * sizeof(unsigned int));
if(d_particles == 0 || d_bins == 0 || d_bin_counters == 0) {
printf("error allocating memory");
exit(1);
}
// Cuda memory copy (host to device)
hipMemcpy(d_particles, h_particles, num_particles * sizeof(float3), hipMemcpyHostToDevice);
// Grid dimensions
int block_size = 512;
int num_blocks_counters = (num_bins + block_size - 1) / block_size;
int num_blocks_bins = (num_bins * bin_size + block_size - 1) / block_size;
int num_blocks_particles = (num_particles + block_size - 1) / block_size;
// Initialize the counters (shpould be done with hipMemset, but use the templated function since its available)
hipLaunchKernelGGL(( initialize), dim3(num_blocks_counters), dim3(block_size) , 0, 0, d_bin_counters, (unsigned int)0, num_bins);
hipLaunchKernelGGL(( initialize), dim3(num_blocks_bins), dim3(block_size) , 0, 0, d_bins, -1, num_bins * bin_size);
start_timer(&timer);
// Do the binning
hipLaunchKernelGGL(( particle_binning), dim3(num_blocks_particles), dim3(block_size) , 0, 0, d_particles, d_bins, d_bin_counters, gridding, num_particles, bin_size);
stop_timer(&timer, "gpu binning");
// Cuda memory copy (device to host)
hipMemcpy(h_bins, d_bins, num_bins * bin_size * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_bin_counters, d_bin_counters, num_bins * sizeof(int), hipMemcpyDeviceToHost);
// Cuda deallocation
hipFree(d_particles);
hipFree(d_bins);
hipFree(d_bin_counters);
}
int main(void)
{
// create arrays of 2M elements
int num_particles = 1<<22;
int log_bpd = 6;
int bins_per_dim = 1 << log_bpd;
unsigned int num_bins = bins_per_dim*bins_per_dim*bins_per_dim;
// extra space to account for load imbalance to prevent frequent aborts due to bin overflow
int bin_size = num_particles/num_bins * 3;
int3 gridding = make_int3(log_bpd,log_bpd,log_bpd);
float3 *h_particles = 0;
int *h_bins = 0;
int *h_bin_counters = 0;
int *h_bins_checker = 0;
float3 *h_particles_checker = 0;
int *h_bin_counters_checker = 0;
int *h_particles_binids_checker = 0;
int h_overflow_flag_checker = 0;
// malloc host array
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_bins = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters = (int*)malloc(num_bins * sizeof(int));
h_particles_checker = (float3*)malloc(num_particles * sizeof(float3));
h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_particles_binids_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int));
// if either memory allocation failed, report an error message
if(h_particles == 0 ||
h_bins == 0 || h_bin_counters == 0 ||
h_bins_checker == 0 || h_bin_counters_checker == 0 ||
h_particles_binids_checker == 0)
{
printf("couldn't allocate memory\n");
exit(1);
}
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = h_particles_checker[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX);
}
for(int i=0;i<num_bins;i++)
{
h_bin_counters[i] = h_bin_counters_checker[i] = 0;
}
for(int i=0;i<num_bins*bin_size;i++)
{
h_bins[i] = h_bins_checker[i] = h_particles_binids_checker[i] = -1;
}
device_binning(h_particles, h_bins, h_bin_counters, gridding, num_particles, num_bins, bin_size);
// generate reference output
start_timer(&timer);
host_binning(h_particles_checker, h_bins_checker, h_bin_counters_checker, &h_overflow_flag_checker, gridding, bin_size, num_particles);
stop_timer(&timer,"cpu binning");
if(h_overflow_flag_checker)
{
printf("one of the bins overflowed!\n");
exit(1);
}
// check CUDA output versus reference output
cross_check_results(h_bins, h_bins_checker, h_bin_counters, h_bin_counters_checker, h_particles_binids_checker, num_particles, num_bins, bin_size);
// deallocate memory
free(h_particles);
free(h_bins);
free(h_bin_counters);
free(h_particles_checker);
free(h_bins_checker);
free(h_particles_binids_checker);
free(h_bin_counters_checker);
return 0;
}
|
05024e0270144dacd272d5e6791e64c44e8cf441.cu
|
/* This is machine problem 2, binning
* The problem is that you have particles in a 3D domain
* which is quantized into blocks or bins. You want to figure
* out which block each particle belongs to.
* Use the atomic functions that you learned about in lecture 3
* to implement the same functionality as the reference version on the cpu.
*
* FOR EXTRA CREDIT:
* Write a version of your binning kernel that uses atomics hierarchically,
* accumulating updates first into shared memory and then merging the results
* from shared memory into the global memory.
* As a hint, think about binning particles first into a coarse grid in a first kernel,
* and then binning the particles from each coarse bin into the
* final bins in a second kernel.
*/
/*
* SUBMISSION INSTRUCTIONS
* =========================
*
* You can submit your entire working directory for this assignment
* from any of the cluster machines by using our submit script. We want to be able
* to just run "make" to compile your code.
* The submit script bundles the entire current directory into
* a submission. Thus, you use it by CDing to a the directory for your assignment,
* and running:
*
* > cd *some directory*
* > /usr/class/cs193g/bin/submit mp2
*
* This will submit the current directory as your assignment. You can submit
* as many times as you want, and we will use your last submission.
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include <cuda.h>
#include "mp2-util.h"
// TODO enable this to print debugging information
//const bool print_debug = true;
const bool print_debug = false;
event_pair timer;
// the particle coordinates are already normalized (in the domain [0,1] )
// gridding provides the base 2 log of how finely the domain is subdivided
// in each direction. So gridding.x == 6 means that the x-axis is subdivided
// into 64 parts. (i.e. 2^(gridding.x) = number of bins on x axis)
// Overall there cannot be more than 4B bins, so we can just concatenate the bin
// indices into a single uint.
__host__ __device__ unsigned int bin_index(float3 particle, int3 gridding)
{
unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x));
unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y));
unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z));
unsigned int index = 0;
index |= z_index;
index <<= gridding.y;
index |= y_index;
index <<= gridding.x;
index |= x_index;
return index;
}
void host_binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length)
{
for(int i=0;i<array_length;i++)
{
unsigned int bin = bin_index(particles[i],gridding);
if(bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
// let's not do the whole precrement / postcrement thing...
bin_counters[bin]++;
bins[bin*bin_size + offset] = i;
}
else {
*overflow_flag = true;
}
}
}
bool cross_check_results(int * h_bins, int * h_bins_checker, int * h_bin_counters, int * h_bin_counters_checker, int * h_particles_binids_checker, int num_particles, int num_bins, int bin_size)
{
int error = 0;
for(int i=0;i<num_bins;i++)
{
if(h_bin_counters[i] != h_bin_counters_checker[i])
{
if(print_debug) fprintf(stderr,"mismatch! bin %d: cuda:%d host:%d particles \n",i,h_bin_counters[i],h_bin_counters_checker[i]);
error = 1;
}
for(int j=0; j<bin_size;j++)
{
// record which these particles went into bin i in the reference version
if(h_bins_checker[i*bin_size+j] != -1)
{
h_particles_binids_checker[h_bins_checker[i*bin_size+j]] = i;
}
}
for(int j=0; j<bin_size;j++)
{
if(h_bins_checker[i*bin_size+j] != -1)
{
if(h_particles_binids_checker[h_bins[i*bin_size+j]] != i)
{
error = 1;
}
}
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else {
printf("Worked! CUDA and reference output match. \n");
}
return error;
}
template
<typename T>
__global__ void initialize(T *array,T value, unsigned int array_length)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid < array_length)
{
array[gid] = value;
}
}
__global__ void particle_binning(float3 *particles, int *bins, unsigned int *bin_counters, int3 gridding, int num_particles, int bin_size)
{
// Global id and bounds checking
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid >= num_particles) {
return;
}
unsigned int bin = bin_index(particles[gid], gridding);
unsigned int offset = atomicInc(&bin_counters[bin], bin_size); // Should check for overflow, but the host function does that already
bins[bin * bin_size + offset] = gid;
}
void device_binning(float3 * h_particles, int * h_bins, int * h_bin_counters, int3 gridding, int num_particles, int num_bins, unsigned int bin_size)
{
// Device pointers
float3 *d_particles = 0;
int *d_bins = 0;
unsigned int *d_bin_counters = 0;
// Cuda memory allocation
cudaMalloc((void**)&d_particles, num_particles * sizeof(float3));
cudaMalloc((void**)&d_bins, num_bins * bin_size * sizeof(unsigned int));
cudaMalloc((void**)&d_bin_counters, num_bins * sizeof(unsigned int));
if(d_particles == 0 || d_bins == 0 || d_bin_counters == 0) {
printf("error allocating memory");
exit(1);
}
// Cuda memory copy (host to device)
cudaMemcpy(d_particles, h_particles, num_particles * sizeof(float3), cudaMemcpyHostToDevice);
// Grid dimensions
int block_size = 512;
int num_blocks_counters = (num_bins + block_size - 1) / block_size;
int num_blocks_bins = (num_bins * bin_size + block_size - 1) / block_size;
int num_blocks_particles = (num_particles + block_size - 1) / block_size;
// Initialize the counters (shpould be done with cudaMemset, but use the templated function since its available)
initialize<<< num_blocks_counters, block_size >>>(d_bin_counters, (unsigned int)0, num_bins);
initialize<<< num_blocks_bins, block_size >>>(d_bins, -1, num_bins * bin_size);
start_timer(&timer);
// Do the binning
particle_binning<<< num_blocks_particles, block_size >>>(d_particles, d_bins, d_bin_counters, gridding, num_particles, bin_size);
stop_timer(&timer, "gpu binning");
// Cuda memory copy (device to host)
cudaMemcpy(h_bins, d_bins, num_bins * bin_size * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_bin_counters, d_bin_counters, num_bins * sizeof(int), cudaMemcpyDeviceToHost);
// Cuda deallocation
cudaFree(d_particles);
cudaFree(d_bins);
cudaFree(d_bin_counters);
}
int main(void)
{
// create arrays of 2M elements
int num_particles = 1<<22;
int log_bpd = 6;
int bins_per_dim = 1 << log_bpd;
unsigned int num_bins = bins_per_dim*bins_per_dim*bins_per_dim;
// extra space to account for load imbalance to prevent frequent aborts due to bin overflow
int bin_size = num_particles/num_bins * 3;
int3 gridding = make_int3(log_bpd,log_bpd,log_bpd);
float3 *h_particles = 0;
int *h_bins = 0;
int *h_bin_counters = 0;
int *h_bins_checker = 0;
float3 *h_particles_checker = 0;
int *h_bin_counters_checker = 0;
int *h_particles_binids_checker = 0;
int h_overflow_flag_checker = 0;
// malloc host array
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_bins = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters = (int*)malloc(num_bins * sizeof(int));
h_particles_checker = (float3*)malloc(num_particles * sizeof(float3));
h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_particles_binids_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int));
// if either memory allocation failed, report an error message
if(h_particles == 0 ||
h_bins == 0 || h_bin_counters == 0 ||
h_bins_checker == 0 || h_bin_counters_checker == 0 ||
h_particles_binids_checker == 0)
{
printf("couldn't allocate memory\n");
exit(1);
}
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = h_particles_checker[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX);
}
for(int i=0;i<num_bins;i++)
{
h_bin_counters[i] = h_bin_counters_checker[i] = 0;
}
for(int i=0;i<num_bins*bin_size;i++)
{
h_bins[i] = h_bins_checker[i] = h_particles_binids_checker[i] = -1;
}
device_binning(h_particles, h_bins, h_bin_counters, gridding, num_particles, num_bins, bin_size);
// generate reference output
start_timer(&timer);
host_binning(h_particles_checker, h_bins_checker, h_bin_counters_checker, &h_overflow_flag_checker, gridding, bin_size, num_particles);
stop_timer(&timer,"cpu binning");
if(h_overflow_flag_checker)
{
printf("one of the bins overflowed!\n");
exit(1);
}
// check CUDA output versus reference output
cross_check_results(h_bins, h_bins_checker, h_bin_counters, h_bin_counters_checker, h_particles_binids_checker, num_particles, num_bins, bin_size);
// deallocate memory
free(h_particles);
free(h_bins);
free(h_bin_counters);
free(h_particles_checker);
free(h_bins_checker);
free(h_particles_binids_checker);
free(h_bin_counters_checker);
return 0;
}
|
b86951cb10942b165e8746491526c23a3e4c38a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "inc/helper_image.h"
#include <cstdlib>
#include <cstdio>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include <iomanip>
#include <chrono>
using namespace std;
const int AMOUNT_OF_THREADS_X = 32;
const int AMOUNT_OF_THREADS_Y = 16;
typedef struct pixel
{
unsigned char r;
unsigned char g;
unsigned char b;
};
typedef struct int_pixel
{
int r;
int g;
int b;
};
#define CUDA_DEBUG
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result, const char* err)
{
#if defined(DEBUG) || defined(CUDA_DEBUG)
if (result != hipSuccess)
{
cerr << "CUDA Runtime Error: " << hipGetErrorString(result) << " at: " << err << endl;
assert(result == hipSuccess);
}
#endif
return result;
}
__global__ void ApplyPrewittFilter(unsigned char* input_data, unsigned char* output_data, const int width, const int height, const int padded_width, const int padded_height, const int in_pitch, const int out_pitch)
{
const int x = blockIdx.x * AMOUNT_OF_THREADS_X + threadIdx.x;
const int y = blockIdx.y * AMOUNT_OF_THREADS_Y + threadIdx.y;
const int int_widht = in_pitch / sizeof(int);
const int output_int_width = out_pitch / sizeof(int);
uchar4* reintterpreted_input = reinterpret_cast<uchar4*>(input_data);
uchar4* reintterpreted_output = reinterpret_cast<uchar4*>(output_data);
__shared__ uchar4 shared_memory[AMOUNT_OF_THREADS_Y + 2][AMOUNT_OF_THREADS_X + 2];
if (x <= int_widht && y <= padded_height)
{
shared_memory[threadIdx.y][threadIdx.x] = reintterpreted_input[y * int_widht + x];
if (y + AMOUNT_OF_THREADS_Y < padded_height && threadIdx.y < 2)
shared_memory[AMOUNT_OF_THREADS_Y + threadIdx.y][threadIdx.x] = reintterpreted_input[(AMOUNT_OF_THREADS_Y + y) * int_widht + x];
if (!(threadIdx.x % 31))
{
shared_memory[threadIdx.y][threadIdx.x + 1] = reintterpreted_input[y * int_widht + x + 1];
shared_memory[threadIdx.y][threadIdx.x + 2] = reintterpreted_input[y * int_widht + x + 2];
}
if ((!(threadIdx.y % 14) || !(threadIdx.y % 15)) && threadIdx.x >= 30)
{
shared_memory[threadIdx.y + 2][threadIdx.x + 2] = reintterpreted_input[(y + 2) * int_widht + x + 2];
}
}
__syncthreads();
if (x <= int_widht && y <= padded_height) {
uchar4 out_uchar4 = { 0 };
uchar4 first_int = shared_memory[threadIdx.y][threadIdx.x];
uchar4 second_int = shared_memory[threadIdx.y][threadIdx.x + 1];
uchar4 third_int = shared_memory[threadIdx.y][threadIdx.x + 2];
uchar4 fourth_int = shared_memory[threadIdx.y + 1][threadIdx.x];
uchar4 fifth_int = shared_memory[threadIdx.y + 1][threadIdx.x + 1];
uchar4 sixth_int = shared_memory[threadIdx.y + 1][threadIdx.x + 2];
uchar4 seventh_int = shared_memory[threadIdx.y + 2][threadIdx.x];
uchar4 eighth_int = shared_memory[threadIdx.y + 2][threadIdx.x + 1];
uchar4 nineth_int = shared_memory[threadIdx.y + 2][threadIdx.x + 2];
int tmp1, tmp2;
/////////////////////
tmp1 = (seventh_int.x + seventh_int.w + eighth_int.z) - (first_int.x + first_int.w + second_int.z);
tmp2 = (second_int.z + fifth_int.z + eighth_int.z) - (first_int.x + fourth_int.x + seventh_int.x);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.x = (tmp1 >= tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
////////////////////
tmp1 = (seventh_int.y + eighth_int.x + eighth_int.w) - (first_int.y + second_int.x + second_int.w);
tmp2 = (second_int.w + fifth_int.w + eighth_int.w) - (first_int.y + fourth_int.y + seventh_int.y);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.y = (tmp1 > tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
/////////////////////////
tmp1 = (seventh_int.z + eighth_int.y + nineth_int.x) - (first_int.z + second_int.y + third_int.x);
tmp2 = (third_int.x + sixth_int.x + nineth_int.x) - (first_int.z + fourth_int.z + seventh_int.z);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.z = (tmp1 > tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
//////////////////////////
tmp1 = (seventh_int.w + eighth_int.z + nineth_int.y) - (first_int.w + second_int.z + third_int.y);
tmp2 = (third_int.y + sixth_int.y + nineth_int.y) - (first_int.w + fourth_int.w + seventh_int.w);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.w = (tmp1 > tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
if (x < output_int_width && y < height)
reintterpreted_output[y * output_int_width + x] = out_uchar4;
}
}
pixel* PadDataByOnePixel(pixel* input_data, int width, int height)
{
const int new_width = width + 2;
const int new_height = height + 2;
pixel* output_data = new pixel[new_width * new_height];
// copy initial part
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
output_data[(y + 1) * new_width + x + 1] = input_data[y * width + x];
}
}
output_data[0] = input_data[0];
output_data[new_width - 1] = input_data[width - 1];
output_data[new_width * (new_height - 1)] = input_data[width * (height - 1)];
output_data[new_width * new_height - 1] = input_data[width * height - 1];
for (int x = 0; x < width; x++)
{
output_data[x + 1] = input_data[x];
output_data[(new_height - 1) * new_width + x + 1] = input_data[width * (height - 1) + x];
}
for (int y = 0; y < height; y++)
{
output_data[(y + 1) * new_width] = input_data[y * width];
output_data[(y + 1) * new_width + new_width - 1] = input_data[y * width + width - 1];
}
return output_data;
}
// Use PadDataByOneByte transforamtion for input before using this function
void PrewittFilter(pixel* input_matrix, pixel* output_matrix, const int width, const int height, const int padded_width, const int padded_height)
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
pixel perfect = { 0 };
int_pixel tmp1 = { 0 };
int_pixel tmp2 = { 0 };
tmp1.r = (
(input_matrix[(y + 2) * padded_width + x].r + input_matrix[(y + 2) * padded_width + x + 1].r + input_matrix[(y + 2) * padded_width + x + 2].r)
-
(input_matrix[y * padded_width + x].r + input_matrix[y * padded_width + x + 1].r + input_matrix[y * padded_width + x + 2].r)
);
tmp1.g = (
(input_matrix[(y + 2) * padded_width + x].g + input_matrix[(y + 2) * padded_width + x + 1].g + input_matrix[(y + 2) * padded_width + x + 2].g)
-
(input_matrix[y * padded_width + x].g + input_matrix[y * padded_width + x + 1].g + input_matrix[y * padded_width + x + 2].g)
);
tmp1.b = (
(input_matrix[(y + 2) * padded_width + x].b + input_matrix[(y + 2) * padded_width + x + 1].b + input_matrix[(y + 2) * padded_width + x + 2].b)
-
(input_matrix[y * padded_width + x].b + input_matrix[y * padded_width + x + 1].b + input_matrix[y * padded_width + x + 2].b)
);
tmp2.r = (
(input_matrix[y * padded_width + x + 2].r + input_matrix[(y + 1) * padded_width + x + 2].r + input_matrix[(y + 2) * padded_width + x + 2].r)
-
(input_matrix[y * padded_width + x].r + input_matrix[(y + 1) * padded_width + x].r + input_matrix[(y + 2) * padded_width + x].r)
);
tmp2.g = (
(input_matrix[y * padded_width + x + 2].g + input_matrix[(y + 1) * padded_width + x + 2].g + input_matrix[(y + 2) * padded_width + x + 2].g)
-
(input_matrix[y * padded_width + x].g + input_matrix[(y + 1) * padded_width + x].g + input_matrix[(y + 2) * padded_width + x].g)
);
tmp2.b = (
(input_matrix[y * padded_width + x + 2].b + input_matrix[(y + 1) * padded_width + x + 2].b + input_matrix[(y + 2) * padded_width + x + 2].b)
-
(input_matrix[y * padded_width + x].b + input_matrix[(y + 1) * padded_width + x].b + input_matrix[(y + 2) * padded_width + x].b)
);
if (tmp1.r > 255) tmp1.r = 255; if (tmp1.r < 0) tmp1.r = 0;
if (tmp1.g > 255) tmp1.g = 255; if (tmp1.g < 0) tmp1.g = 0;
if (tmp1.b > 255) tmp1.b = 255; if (tmp1.b < 0) tmp1.b = 0;
if (tmp2.r > 255) tmp2.r = 255; if (tmp2.r < 0) tmp2.r = 0;
if (tmp2.g > 255) tmp2.g = 255; if (tmp2.g < 0) tmp2.g = 0;
if (tmp2.b > 255) tmp2.b = 255; if (tmp2.b < 0) tmp2.b = 0;
perfect.r = (tmp1.r > tmp2.r) ? (unsigned char)tmp1.r : (unsigned char)tmp2.r;
perfect.g = (tmp1.g > tmp2.g) ? (unsigned char)tmp1.g : (unsigned char)tmp2.g;
perfect.b = (tmp1.b > tmp2.b) ? (unsigned char)tmp1.b : (unsigned char)tmp2.b;
output_matrix[y * width + x] = perfect;
}
}
}
void ApplyPrewittFilter(pixel* input_matrix, pixel* output_matrix, const int width, const int height, pixel* gpu_output_data)
{
pixel* padded_input_matrix = PadDataByOnePixel(input_matrix, width, height);
const int padded_width = width + 2;
const int padded_height = height + 2;
PrewittFilter(padded_input_matrix, output_matrix, width, height, padded_width, padded_height);
}
void cuda_filter(size_t width, size_t height, const size_t width_in_bytes, const size_t padded_width_in_bytes, pixel* input_data)
{
// ********************************************************************************************************
size_t input_pitch = 0;
pixel* padded_input = PadDataByOnePixel(input_data, width, height);
unsigned char* pitched_input_data = nullptr;
checkCuda(hipMallocPitch(reinterpret_cast<void**>(&pitched_input_data), &input_pitch, padded_width_in_bytes, padded_height), "CudaMallocPitch");
checkCuda(hipMemcpy2D(pitched_input_data, input_pitch, reinterpret_cast<unsigned char**>(padded_input), padded_width_in_bytes, padded_width_in_bytes, padded_height, hipMemcpyHostToDevice), "CudaMemcpy2D");
size_t output_pitch = 0;
unsigned char* pitched_output_data = nullptr;
checkCuda(hipMallocPitch(reinterpret_cast<void**>(&pitched_output_data), &output_pitch, width_in_bytes, height), "CudaMallocPitch");
float gpu_time_count = 0;
hipEvent_t startEvent, stopEvent;
checkCuda(hipEventCreate(&startEvent), "CudaEventCreate");
checkCuda(hipEventCreate(&stopEvent), "CudaEventCreate");
//
cout << "Filtering via GPU" << " pitch: " << input_pitch << " " << output_pitch << endl;
int aligned_width = (input_pitch + AMOUNT_OF_THREADS_X - 1) / AMOUNT_OF_THREADS_X;
int aligned_height = (height + AMOUNT_OF_THREADS_Y - 1) / AMOUNT_OF_THREADS_Y;
dim3 dimGrid(aligned_width, aligned_height, 1);
dim3 dimBlock(AMOUNT_OF_THREADS_X, AMOUNT_OF_THREADS_Y, 1);
checkCuda(hipEventRecord(startEvent, 0), "CudaEventRecord");
ApplyPrewittFilter << <dimGrid, dimBlock >> > (pitched_input_data, pitched_output_data, width_in_bytes, height, padded_width_in_bytes, padded_height, input_pitch, output_pitch);
checkCuda(hipEventRecord(stopEvent, 0), "CudaEventRecord");
checkCuda(hipEventSynchronize(stopEvent), "CudaEventSynchronize");
checkCuda(hipEventElapsedTime(&gpu_time_count, startEvent, stopEvent), "CudaEventElapsedTime");
cout << "GPU time: " << gpu_time_count << endl;
checkCuda(hipMemcpy2D(reinterpret_cast<unsigned char*>(gpu_output_data), width_in_bytes, pitched_output_data, output_pitch, width_in_bytes, height, hipMemcpyDeviceToHost), "Memcpu2d");
checkCuda(hipEventDestroy(startEvent), "CudaEventDestroy");
checkCuda(hipEventDestroy(stopEvent), "CudaEventDestroy");
checkCuda(hipFree(pitched_input_data), "CudaFree");
checkCuda(hipFree(pitched_output_data), "CudaFree");
}
|
b86951cb10942b165e8746491526c23a3e4c38a8.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "inc/helper_image.h"
#include <cstdlib>
#include <cstdio>
#include <stdio.h>
#include <cassert>
#include <iostream>
#include <iomanip>
#include <chrono>
using namespace std;
const int AMOUNT_OF_THREADS_X = 32;
const int AMOUNT_OF_THREADS_Y = 16;
typedef struct pixel
{
unsigned char r;
unsigned char g;
unsigned char b;
};
typedef struct int_pixel
{
int r;
int g;
int b;
};
#define CUDA_DEBUG
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result, const char* err)
{
#if defined(DEBUG) || defined(CUDA_DEBUG)
if (result != cudaSuccess)
{
cerr << "CUDA Runtime Error: " << cudaGetErrorString(result) << " at: " << err << endl;
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void ApplyPrewittFilter(unsigned char* input_data, unsigned char* output_data, const int width, const int height, const int padded_width, const int padded_height, const int in_pitch, const int out_pitch)
{
const int x = blockIdx.x * AMOUNT_OF_THREADS_X + threadIdx.x;
const int y = blockIdx.y * AMOUNT_OF_THREADS_Y + threadIdx.y;
const int int_widht = in_pitch / sizeof(int);
const int output_int_width = out_pitch / sizeof(int);
uchar4* reintterpreted_input = reinterpret_cast<uchar4*>(input_data);
uchar4* reintterpreted_output = reinterpret_cast<uchar4*>(output_data);
__shared__ uchar4 shared_memory[AMOUNT_OF_THREADS_Y + 2][AMOUNT_OF_THREADS_X + 2];
if (x <= int_widht && y <= padded_height)
{
shared_memory[threadIdx.y][threadIdx.x] = reintterpreted_input[y * int_widht + x];
if (y + AMOUNT_OF_THREADS_Y < padded_height && threadIdx.y < 2)
shared_memory[AMOUNT_OF_THREADS_Y + threadIdx.y][threadIdx.x] = reintterpreted_input[(AMOUNT_OF_THREADS_Y + y) * int_widht + x];
if (!(threadIdx.x % 31))
{
shared_memory[threadIdx.y][threadIdx.x + 1] = reintterpreted_input[y * int_widht + x + 1];
shared_memory[threadIdx.y][threadIdx.x + 2] = reintterpreted_input[y * int_widht + x + 2];
}
if ((!(threadIdx.y % 14) || !(threadIdx.y % 15)) && threadIdx.x >= 30)
{
shared_memory[threadIdx.y + 2][threadIdx.x + 2] = reintterpreted_input[(y + 2) * int_widht + x + 2];
}
}
__syncthreads();
if (x <= int_widht && y <= padded_height) {
uchar4 out_uchar4 = { 0 };
uchar4 first_int = shared_memory[threadIdx.y][threadIdx.x];
uchar4 second_int = shared_memory[threadIdx.y][threadIdx.x + 1];
uchar4 third_int = shared_memory[threadIdx.y][threadIdx.x + 2];
uchar4 fourth_int = shared_memory[threadIdx.y + 1][threadIdx.x];
uchar4 fifth_int = shared_memory[threadIdx.y + 1][threadIdx.x + 1];
uchar4 sixth_int = shared_memory[threadIdx.y + 1][threadIdx.x + 2];
uchar4 seventh_int = shared_memory[threadIdx.y + 2][threadIdx.x];
uchar4 eighth_int = shared_memory[threadIdx.y + 2][threadIdx.x + 1];
uchar4 nineth_int = shared_memory[threadIdx.y + 2][threadIdx.x + 2];
int tmp1, tmp2;
/////////////////////
tmp1 = (seventh_int.x + seventh_int.w + eighth_int.z) - (first_int.x + first_int.w + second_int.z);
tmp2 = (second_int.z + fifth_int.z + eighth_int.z) - (first_int.x + fourth_int.x + seventh_int.x);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.x = (tmp1 >= tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
////////////////////
tmp1 = (seventh_int.y + eighth_int.x + eighth_int.w) - (first_int.y + second_int.x + second_int.w);
tmp2 = (second_int.w + fifth_int.w + eighth_int.w) - (first_int.y + fourth_int.y + seventh_int.y);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.y = (tmp1 > tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
/////////////////////////
tmp1 = (seventh_int.z + eighth_int.y + nineth_int.x) - (first_int.z + second_int.y + third_int.x);
tmp2 = (third_int.x + sixth_int.x + nineth_int.x) - (first_int.z + fourth_int.z + seventh_int.z);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.z = (tmp1 > tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
//////////////////////////
tmp1 = (seventh_int.w + eighth_int.z + nineth_int.y) - (first_int.w + second_int.z + third_int.y);
tmp2 = (third_int.y + sixth_int.y + nineth_int.y) - (first_int.w + fourth_int.w + seventh_int.w);
if (tmp1 > 255) tmp1 = 255;
if (tmp1 < 0) tmp1 = 0;
if (tmp2 > 255) tmp2 = 255;
if (tmp2 < 0) tmp2 = 0;
out_uchar4.w = (tmp1 > tmp2) ? (unsigned char)tmp1 : (unsigned char)tmp2;
if (x < output_int_width && y < height)
reintterpreted_output[y * output_int_width + x] = out_uchar4;
}
}
pixel* PadDataByOnePixel(pixel* input_data, int width, int height)
{
const int new_width = width + 2;
const int new_height = height + 2;
pixel* output_data = new pixel[new_width * new_height];
// copy initial part
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
output_data[(y + 1) * new_width + x + 1] = input_data[y * width + x];
}
}
output_data[0] = input_data[0];
output_data[new_width - 1] = input_data[width - 1];
output_data[new_width * (new_height - 1)] = input_data[width * (height - 1)];
output_data[new_width * new_height - 1] = input_data[width * height - 1];
for (int x = 0; x < width; x++)
{
output_data[x + 1] = input_data[x];
output_data[(new_height - 1) * new_width + x + 1] = input_data[width * (height - 1) + x];
}
for (int y = 0; y < height; y++)
{
output_data[(y + 1) * new_width] = input_data[y * width];
output_data[(y + 1) * new_width + new_width - 1] = input_data[y * width + width - 1];
}
return output_data;
}
// Use PadDataByOneByte transforamtion for input before using this function
void PrewittFilter(pixel* input_matrix, pixel* output_matrix, const int width, const int height, const int padded_width, const int padded_height)
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
pixel perfect = { 0 };
int_pixel tmp1 = { 0 };
int_pixel tmp2 = { 0 };
tmp1.r = (
(input_matrix[(y + 2) * padded_width + x].r + input_matrix[(y + 2) * padded_width + x + 1].r + input_matrix[(y + 2) * padded_width + x + 2].r)
-
(input_matrix[y * padded_width + x].r + input_matrix[y * padded_width + x + 1].r + input_matrix[y * padded_width + x + 2].r)
);
tmp1.g = (
(input_matrix[(y + 2) * padded_width + x].g + input_matrix[(y + 2) * padded_width + x + 1].g + input_matrix[(y + 2) * padded_width + x + 2].g)
-
(input_matrix[y * padded_width + x].g + input_matrix[y * padded_width + x + 1].g + input_matrix[y * padded_width + x + 2].g)
);
tmp1.b = (
(input_matrix[(y + 2) * padded_width + x].b + input_matrix[(y + 2) * padded_width + x + 1].b + input_matrix[(y + 2) * padded_width + x + 2].b)
-
(input_matrix[y * padded_width + x].b + input_matrix[y * padded_width + x + 1].b + input_matrix[y * padded_width + x + 2].b)
);
tmp2.r = (
(input_matrix[y * padded_width + x + 2].r + input_matrix[(y + 1) * padded_width + x + 2].r + input_matrix[(y + 2) * padded_width + x + 2].r)
-
(input_matrix[y * padded_width + x].r + input_matrix[(y + 1) * padded_width + x].r + input_matrix[(y + 2) * padded_width + x].r)
);
tmp2.g = (
(input_matrix[y * padded_width + x + 2].g + input_matrix[(y + 1) * padded_width + x + 2].g + input_matrix[(y + 2) * padded_width + x + 2].g)
-
(input_matrix[y * padded_width + x].g + input_matrix[(y + 1) * padded_width + x].g + input_matrix[(y + 2) * padded_width + x].g)
);
tmp2.b = (
(input_matrix[y * padded_width + x + 2].b + input_matrix[(y + 1) * padded_width + x + 2].b + input_matrix[(y + 2) * padded_width + x + 2].b)
-
(input_matrix[y * padded_width + x].b + input_matrix[(y + 1) * padded_width + x].b + input_matrix[(y + 2) * padded_width + x].b)
);
if (tmp1.r > 255) tmp1.r = 255; if (tmp1.r < 0) tmp1.r = 0;
if (tmp1.g > 255) tmp1.g = 255; if (tmp1.g < 0) tmp1.g = 0;
if (tmp1.b > 255) tmp1.b = 255; if (tmp1.b < 0) tmp1.b = 0;
if (tmp2.r > 255) tmp2.r = 255; if (tmp2.r < 0) tmp2.r = 0;
if (tmp2.g > 255) tmp2.g = 255; if (tmp2.g < 0) tmp2.g = 0;
if (tmp2.b > 255) tmp2.b = 255; if (tmp2.b < 0) tmp2.b = 0;
perfect.r = (tmp1.r > tmp2.r) ? (unsigned char)tmp1.r : (unsigned char)tmp2.r;
perfect.g = (tmp1.g > tmp2.g) ? (unsigned char)tmp1.g : (unsigned char)tmp2.g;
perfect.b = (tmp1.b > tmp2.b) ? (unsigned char)tmp1.b : (unsigned char)tmp2.b;
output_matrix[y * width + x] = perfect;
}
}
}
void ApplyPrewittFilter(pixel* input_matrix, pixel* output_matrix, const int width, const int height, pixel* gpu_output_data)
{
pixel* padded_input_matrix = PadDataByOnePixel(input_matrix, width, height);
const int padded_width = width + 2;
const int padded_height = height + 2;
PrewittFilter(padded_input_matrix, output_matrix, width, height, padded_width, padded_height);
}
void cuda_filter(size_t width, size_t height, const size_t width_in_bytes, const size_t padded_width_in_bytes, pixel* input_data)
{
// ********************************************************************************************************
size_t input_pitch = 0;
pixel* padded_input = PadDataByOnePixel(input_data, width, height);
unsigned char* pitched_input_data = nullptr;
checkCuda(cudaMallocPitch(reinterpret_cast<void**>(&pitched_input_data), &input_pitch, padded_width_in_bytes, padded_height), "CudaMallocPitch");
checkCuda(cudaMemcpy2D(pitched_input_data, input_pitch, reinterpret_cast<unsigned char**>(padded_input), padded_width_in_bytes, padded_width_in_bytes, padded_height, cudaMemcpyHostToDevice), "CudaMemcpy2D");
size_t output_pitch = 0;
unsigned char* pitched_output_data = nullptr;
checkCuda(cudaMallocPitch(reinterpret_cast<void**>(&pitched_output_data), &output_pitch, width_in_bytes, height), "CudaMallocPitch");
float gpu_time_count = 0;
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent), "CudaEventCreate");
checkCuda(cudaEventCreate(&stopEvent), "CudaEventCreate");
//
cout << "Filtering via GPU" << " pitch: " << input_pitch << " " << output_pitch << endl;
int aligned_width = (input_pitch + AMOUNT_OF_THREADS_X - 1) / AMOUNT_OF_THREADS_X;
int aligned_height = (height + AMOUNT_OF_THREADS_Y - 1) / AMOUNT_OF_THREADS_Y;
dim3 dimGrid(aligned_width, aligned_height, 1);
dim3 dimBlock(AMOUNT_OF_THREADS_X, AMOUNT_OF_THREADS_Y, 1);
checkCuda(cudaEventRecord(startEvent, 0), "CudaEventRecord");
ApplyPrewittFilter << <dimGrid, dimBlock >> > (pitched_input_data, pitched_output_data, width_in_bytes, height, padded_width_in_bytes, padded_height, input_pitch, output_pitch);
checkCuda(cudaEventRecord(stopEvent, 0), "CudaEventRecord");
checkCuda(cudaEventSynchronize(stopEvent), "CudaEventSynchronize");
checkCuda(cudaEventElapsedTime(&gpu_time_count, startEvent, stopEvent), "CudaEventElapsedTime");
cout << "GPU time: " << gpu_time_count << endl;
checkCuda(cudaMemcpy2D(reinterpret_cast<unsigned char*>(gpu_output_data), width_in_bytes, pitched_output_data, output_pitch, width_in_bytes, height, cudaMemcpyDeviceToHost), "Memcpu2d");
checkCuda(cudaEventDestroy(startEvent), "CudaEventDestroy");
checkCuda(cudaEventDestroy(stopEvent), "CudaEventDestroy");
checkCuda(cudaFree(pitched_input_data), "CudaFree");
checkCuda(cudaFree(pitched_output_data), "CudaFree");
}
|
36836cc7cb3e0aa97b30258dda88ec00ab5178fe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This program executes a typical convolutional layer in regular CNNs
#include <iostream>
#include "cnnConvLayer.h"
using namespace std;
// This is the CPU version, please don't modify it
void convLayerCPU()
{
// declarations for bunch of indexing parameters
int fn, sli, fmy, fmx, y, x;
int sum, ifmy, ifmx, ofmy, ofmx;
int filtIdx, inNeuIdx, outNeuIdx, outIdx;
int filtVol = FMDEPTH * FILTSIZE * FILTSIZE;
int filtArea = FILTSIZE * FILTSIZE;
int fmArea = FMSIZE *FMSIZE;
int outArea = FMSIZE/2 * FMSIZE/2;
// Convolution
for(fn = 0; fn < FILTNUM; fn++){
for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){
for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){
for(sli = 0; sli < FMDEPTH; sli++){
sum = 0;
for(y = 0; y < FILTSIZE; y++){
for(x = 0; x < FILTSIZE; x++){
ifmy = fmy - FILTSIZE / 2 + y;
ifmx = fmx - FILTSIZE / 2 + x;
filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x;
inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx;
if(ifmy > 0 && ifmy < FMSIZE && ifmx > 0 && ifmx < FMSIZE)
sum += filt[filtIdx] * inNeu[inNeuIdx];
}
}
}
// Activation - ReLU
outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx;
if(sum <= 0)
outNeu[outNeuIdx] = 0;
else
outNeu[outNeuIdx] = sum;
}
}
}
// Max Pooling with Window Size 2x2
int max, tmpVal;
for(sli = 0; sli < FILTNUM; sli++){
for(fmy = 0; fmy < FMSIZE/2 ; fmy += 1){
for(fmx = 0; fmx < FMSIZE/2 ; fmx += 1){
outNeuIdx = sli*fmArea + fmy*2*FMSIZE + fmx*2;
max = outNeu[outNeuIdx];
for(y = 0; y < 2; y++){
for(x = 0; x < 2; x++){
ofmy = fmy*2 + y;
ofmx = fmx*2 + x;
outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx;
tmpVal = outNeu[outNeuIdx];
if(tmpVal > max)
max = tmpVal;
}
}
outIdx = sli*outArea + fmy*FMSIZE/2 + fmx;
outCPU[outIdx] = max;
}
}
}
}
/*** Implement your CUDA Kernel here ***/
__global__
void convLayerGPU()
{
}
/*** Implement your CUDA Kernel here ***/
int main()
{
int convLayerCPUExecTime, convLayerGPUExecTime;
init();
timespec time_begin, time_end;
clock_gettime(CLOCK_REALTIME, &time_begin);
convLayerCPU();
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerCPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "CPU time for executing a typical convolutional layer = "
<< convLayerCPUExecTime / 1000 << "ms" << endl;
clock_gettime(CLOCK_REALTIME, &time_begin);
/*** Lunch your CUDA Kernel here ***/
hipLaunchKernelGGL(( convLayerGPU), dim3(1),dim3(1), 0, 0, ); // Lunch the kernel
hipDeviceSynchronize(); // Do synchronization before clock_gettime()
/*** Lunch your CUDA Kernel here ***/
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerGPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "GPU time for executing a typical convolutional layer = "
<< convLayerGPUExecTime / 1000 << "ms" << endl;
if(checker()){
cout << "Congratulations! You pass the check." << endl;
cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl;
}
else
cout << "Sorry! Your result is wrong." << endl;
ending();
return 0;
}
|
36836cc7cb3e0aa97b30258dda88ec00ab5178fe.cu
|
// This program executes a typical convolutional layer in regular CNNs
#include <iostream>
#include "cnnConvLayer.h"
using namespace std;
// This is the CPU version, please don't modify it
void convLayerCPU()
{
// declarations for bunch of indexing parameters
int fn, sli, fmy, fmx, y, x;
int sum, ifmy, ifmx, ofmy, ofmx;
int filtIdx, inNeuIdx, outNeuIdx, outIdx;
int filtVol = FMDEPTH * FILTSIZE * FILTSIZE;
int filtArea = FILTSIZE * FILTSIZE;
int fmArea = FMSIZE *FMSIZE;
int outArea = FMSIZE/2 * FMSIZE/2;
// Convolution
for(fn = 0; fn < FILTNUM; fn++){
for(fmy = 0; fmy < FMSIZE; fmy += STRIDE){
for(fmx = 0; fmx < FMSIZE; fmx += STRIDE){
for(sli = 0; sli < FMDEPTH; sli++){
sum = 0;
for(y = 0; y < FILTSIZE; y++){
for(x = 0; x < FILTSIZE; x++){
ifmy = fmy - FILTSIZE / 2 + y;
ifmx = fmx - FILTSIZE / 2 + x;
filtIdx = fn*filtVol + sli*filtArea + y*FILTSIZE + x;
inNeuIdx = sli*fmArea + ifmy*FMSIZE + ifmx;
if(ifmy > 0 && ifmy < FMSIZE && ifmx > 0 && ifmx < FMSIZE)
sum += filt[filtIdx] * inNeu[inNeuIdx];
}
}
}
// Activation - ReLU
outNeuIdx = fn*fmArea + fmy*FMSIZE + fmx;
if(sum <= 0)
outNeu[outNeuIdx] = 0;
else
outNeu[outNeuIdx] = sum;
}
}
}
// Max Pooling with Window Size 2x2
int max, tmpVal;
for(sli = 0; sli < FILTNUM; sli++){
for(fmy = 0; fmy < FMSIZE/2 ; fmy += 1){
for(fmx = 0; fmx < FMSIZE/2 ; fmx += 1){
outNeuIdx = sli*fmArea + fmy*2*FMSIZE + fmx*2;
max = outNeu[outNeuIdx];
for(y = 0; y < 2; y++){
for(x = 0; x < 2; x++){
ofmy = fmy*2 + y;
ofmx = fmx*2 + x;
outNeuIdx = sli*fmArea + ofmy*FMSIZE + ofmx;
tmpVal = outNeu[outNeuIdx];
if(tmpVal > max)
max = tmpVal;
}
}
outIdx = sli*outArea + fmy*FMSIZE/2 + fmx;
outCPU[outIdx] = max;
}
}
}
}
/*** Implement your CUDA Kernel here ***/
__global__
void convLayerGPU()
{
}
/*** Implement your CUDA Kernel here ***/
int main()
{
int convLayerCPUExecTime, convLayerGPUExecTime;
init();
timespec time_begin, time_end;
clock_gettime(CLOCK_REALTIME, &time_begin);
convLayerCPU();
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerCPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "CPU time for executing a typical convolutional layer = "
<< convLayerCPUExecTime / 1000 << "ms" << endl;
clock_gettime(CLOCK_REALTIME, &time_begin);
/*** Lunch your CUDA Kernel here ***/
convLayerGPU<<<1,1>>>(); // Lunch the kernel
cudaDeviceSynchronize(); // Do synchronization before clock_gettime()
/*** Lunch your CUDA Kernel here ***/
clock_gettime(CLOCK_REALTIME, &time_end);
convLayerGPUExecTime = timespec_diff_us(time_begin, time_end);
cout << "GPU time for executing a typical convolutional layer = "
<< convLayerGPUExecTime / 1000 << "ms" << endl;
if(checker()){
cout << "Congratulations! You pass the check." << endl;
cout << "Speedup: " << (float)convLayerCPUExecTime / convLayerGPUExecTime << endl;
}
else
cout << "Sorry! Your result is wrong." << endl;
ending();
return 0;
}
|
470169c8dba95ab8992654d52eeba4d16571a9d1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "closestGPU.h"
#include "closestCPU.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace std;
int main(){
// Number of points
const int count = 100000;
// Create an array of points and floats (HOST)
int* index = new int[count];
float3* Newpoints = new float3[count];
for(int i=0; i<count; i++){
Newpoints[i].x = (float)((rand()%10000)-5000);
Newpoints[i].y = (float)((rand()%10000)-5000);
Newpoints[i].z = (float)((rand()%10000)-5000);
}
// Create an array of points and floats (DEVICE)
int* d_index;
float3* d_Newpoints;
// Allocate memory to GPU
if(hipMalloc(&d_index, sizeof(int)*count) != hipSuccess){
cout << "Error in memory allocation of array index.";
return 0;
}
if(hipMalloc(&d_Newpoints, sizeof(float3)*count) != hipSuccess){
cout << "Error in memory allocation of array Newpoints.";
hipFree(d_index);
return 0;
}
// Copy to device memory
hipMemcpy(d_index,index, sizeof(int)*count, hipMemcpyHostToDevice);
hipMemcpy(d_Newpoints, Newpoints, sizeof(float3)*count, hipMemcpyHostToDevice);
// Upper bound of fastest time
long fastest = 1000000;
for(int j=0; j<20; j++){
// Start time
long start = clock();
// Run the algorithm
// findClosestCPU(Newpoints, index, count); // CPU
hipLaunchKernelGGL(( findClosestGPU) , dim3((count/32)+1),dim3(32), 0, 0, d_Newpoints, d_index,count);
hipMemcpy(index, d_index, sizeof(int)*count, hipMemcpyDeviceToHost);
// End time
long finish = clock();
cout <<"Run "<<j<<" took "<<(finish-start)<<" millis "<<endl;
if((finish-start) < fastest){
fastest = (finish-start);
}
}
// Print fastest time
cout<<"======================="<<endl;
cout << "Fastest time: "<<fastest<<" millis "<<endl;
// Print results
cout<<"+++++++++++++++++++++++"<<endl;
cout<< "Printing 5 sample results..."<<endl;
for(int i=0; i<5; i++){
cout<<i<<" --> ("<< Newpoints[i].x<< ","<<Newpoints[i].y<<","<<Newpoints[i].z<<")"<<endl;
}
cout<<"+++++++++++++++++++++++"<<endl;
cout<< "Printing 5 sample results..."<<endl;
for(int i=0; i<5; i++){
cout<<i<<" --> "<< index[i]<< endl;
}
// Deallocate ram
hipFree(d_Newpoints);
hipFree(d_index);
delete[] index;
delete[] Newpoints;
return 0;
}
|
470169c8dba95ab8992654d52eeba4d16571a9d1.cu
|
#include <iostream>
#include "closestGPU.h"
#include "closestCPU.h"
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
int main(){
// Number of points
const int count = 100000;
// Create an array of points and floats (HOST)
int* index = new int[count];
float3* Newpoints = new float3[count];
for(int i=0; i<count; i++){
Newpoints[i].x = (float)((rand()%10000)-5000);
Newpoints[i].y = (float)((rand()%10000)-5000);
Newpoints[i].z = (float)((rand()%10000)-5000);
}
// Create an array of points and floats (DEVICE)
int* d_index;
float3* d_Newpoints;
// Allocate memory to GPU
if(cudaMalloc(&d_index, sizeof(int)*count) != cudaSuccess){
cout << "Error in memory allocation of array index.";
return 0;
}
if(cudaMalloc(&d_Newpoints, sizeof(float3)*count) != cudaSuccess){
cout << "Error in memory allocation of array Newpoints.";
cudaFree(d_index);
return 0;
}
// Copy to device memory
cudaMemcpy(d_index,index, sizeof(int)*count, cudaMemcpyHostToDevice);
cudaMemcpy(d_Newpoints, Newpoints, sizeof(float3)*count, cudaMemcpyHostToDevice);
// Upper bound of fastest time
long fastest = 1000000;
for(int j=0; j<20; j++){
// Start time
long start = clock();
// Run the algorithm
// findClosestCPU(Newpoints, index, count); // CPU
findClosestGPU <<<(count/32)+1,32>>> (d_Newpoints, d_index,count);
cudaMemcpy(index, d_index, sizeof(int)*count, cudaMemcpyDeviceToHost);
// End time
long finish = clock();
cout <<"Run "<<j<<" took "<<(finish-start)<<" millis "<<endl;
if((finish-start) < fastest){
fastest = (finish-start);
}
}
// Print fastest time
cout<<"======================="<<endl;
cout << "Fastest time: "<<fastest<<" millis "<<endl;
// Print results
cout<<"+++++++++++++++++++++++"<<endl;
cout<< "Printing 5 sample results..."<<endl;
for(int i=0; i<5; i++){
cout<<i<<" --> ("<< Newpoints[i].x<< ","<<Newpoints[i].y<<","<<Newpoints[i].z<<")"<<endl;
}
cout<<"+++++++++++++++++++++++"<<endl;
cout<< "Printing 5 sample results..."<<endl;
for(int i=0; i<5; i++){
cout<<i<<" --> "<< index[i]<< endl;
}
// Deallocate ram
cudaFree(d_Newpoints);
cudaFree(d_index);
delete[] index;
delete[] Newpoints;
return 0;
}
|
3b54ae95e4c3f0638fa404a52d1baf021c3a1a4e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
texture<float,2,hipReadModeElementType> texRef_f0A;
texture<float,2,hipReadModeElementType> texRef_f1A;
texture<float,2,hipReadModeElementType> texRef_f2A;
texture<float,2,hipReadModeElementType> texRef_f3A;
texture<float,2,hipReadModeElementType> texRef_f4A;
texture<float,2,hipReadModeElementType> texRef_f5A;
texture<float,2,hipReadModeElementType> texRef_f6A;
texture<float,2,hipReadModeElementType> texRef_f7A;
texture<float,2,hipReadModeElementType> texRef_f8A;
texture<float,2,hipReadModeElementType> texRef_f9A;
texture<float,2,hipReadModeElementType> texRef_f10A;
texture<float,2,hipReadModeElementType> texRef_f11A;
texture<float,2,hipReadModeElementType> texRef_f12A;
texture<float,2,hipReadModeElementType> texRef_f13A;
texture<float,2,hipReadModeElementType> texRef_f14A;
texture<float,2,hipReadModeElementType> texRef_f15A;
texture<float,2,hipReadModeElementType> texRef_f16A;
texture<float,2,hipReadModeElementType> texRef_f17A;
texture<float,2,hipReadModeElementType> texRef_f18A;
texture<float,2,hipReadModeElementType> texRef_f0B;
texture<float,2,hipReadModeElementType> texRef_f1B;
texture<float,2,hipReadModeElementType> texRef_f2B;
texture<float,2,hipReadModeElementType> texRef_f3B;
texture<float,2,hipReadModeElementType> texRef_f4B;
texture<float,2,hipReadModeElementType> texRef_f5B;
texture<float,2,hipReadModeElementType> texRef_f6B;
texture<float,2,hipReadModeElementType> texRef_f7B;
texture<float,2,hipReadModeElementType> texRef_f8B;
texture<float,2,hipReadModeElementType> texRef_f9B;
texture<float,2,hipReadModeElementType> texRef_f10B;
texture<float,2,hipReadModeElementType> texRef_f11B;
texture<float,2,hipReadModeElementType> texRef_f12B;
texture<float,2,hipReadModeElementType> texRef_f13B;
texture<float,2,hipReadModeElementType> texRef_f14B;
texture<float,2,hipReadModeElementType> texRef_f15B;
texture<float,2,hipReadModeElementType> texRef_f16B;
texture<float,2,hipReadModeElementType> texRef_f17B;
texture<float,2,hipReadModeElementType> texRef_f18B;
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = u*u+v*v+w*w;
float usqr = fma(u,u,fma(v,v,w*w));
f0 -= omega*fma(-0.3333333333f,(fma(-1.5f,usqr,rho)),f0);//(f0 -0.3333333333f*(fma(-1.5f,usqr,rho)));//rho-1.5f*usqr));
f1 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f1);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f2);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f3 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f3);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f4);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f5 -= omega*fma(-0.0555555556f,fma(3.0f,( u+v),rho)+fma(4.5f,( u+v)*( u+v),-1.5f*usqr),f5 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f6 -= omega*fma(-0.0555555556f,fma(3.0f,(-u+v),rho)+fma(4.5f,(-u+v)*(-u+v),-1.5f*usqr),f6 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f7 -= omega*fma(-0.0555555556f,fma(3.0f,(-u-v),rho)+fma(4.5f,(-u-v)*(-u-v),-1.5f*usqr),f7 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f8 -= omega*fma(-0.0555555556f,fma(3.0f,( u-v),rho)+fma(4.5f,( u-v)*( u-v),-1.5f*usqr),f8 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f9 -= omega*fma(-0.0555555556f,fma(3.0f,( w),rho)+fma(4.5f,( w)*( w),-1.5f*usqr),f9 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f10-= omega*fma(-0.0277777778f,fma(3.0f,( u+w),rho)+fma(4.5f,( u+w)*( u+w),-1.5f*usqr),f10);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f11-= omega*fma(-0.0277777778f,fma(3.0f,( v+w),rho)+fma(4.5f,( v+w)*( v+w),-1.5f*usqr),f11);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f12-= omega*fma(-0.0277777778f,fma(3.0f,(-u+w),rho)+fma(4.5f,(-u+w)*(-u+w),-1.5f*usqr),f12);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f13-= omega*fma(-0.0277777778f,fma(3.0f,(-v+w),rho)+fma(4.5f,(-v+w)*(-v+w),-1.5f*usqr),f13);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f14-= omega*fma(-0.0555555556f,fma(3.0f,( -w),rho)+fma(4.5f,( -w)*( -w),-1.5f*usqr),f14);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f15-= omega*fma(-0.0277777778f,fma(3.0f,( u-w),rho)+fma(4.5f,( u-w)*( u-w),-1.5f*usqr),f15);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f16-= omega*fma(-0.0277777778f,fma(3.0f,( v-w),rho)+fma(4.5f,( v-w)*( v-w),-1.5f*usqr),f16);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f17-= omega*fma(-0.0277777778f,fma(3.0f,(-u-w),rho)+fma(4.5f,(-u-w)*(-u-w),-1.5f*usqr),f17);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f18-= omega*fma(-0.0277777778f,fma(3.0f,(-v-w),rho)+fma(4.5f,(-v-w)*(-v-w),-1.5f*usqr),f18);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
// f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
__device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ 1.f*f8+ -4.f*f9+ f10+ 1.f*f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
m1 -= -11.f*rho+19.f*(u*u+v*v+w*w);
m2 -= -7.53968254f*(u*u+v*v+w*w);
m4 -= -0.66666667f*u;//qx_eq
m6 -= -0.66666667f*v;//qx_eq
m8 -= -0.66666667f*w;//qx_eq
m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
m11-= (v*v-w*w);//pww_eq
m13-= u*v;//pxy_eq
m14-= v*w;//pyz_eq
m15-= u*w;//pxz_eq
f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f2 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
f4 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
f5 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
f6 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
f7 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
f8 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
f9 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
f10 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
f11 -= +( 0.25f*(m14) )*omega ;
f12 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
f13 -= +( -0.25f*(m14) )*omega ;
f14 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
f15 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
f16 -= +( -0.25f*(m14) )*omega ;
f17 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
f18 -= +( 0.25f*(m14) )*omega ;
f5 -= 0.125f*(m16)+ -0.125f*(m17);
f6 -= -0.125f*(m16)+ -0.125f*(m17);
f7 -= -0.125f*(m16)+ 0.125f*(m17);
f8 -= 0.125f*(m16)+ 0.125f*(m17);
f10 -= -0.125f*(m16) + 0.125f*(m18);
f11 -= + 0.125f*(m17)+ -0.125f*(m18);
f12 -= 0.125f*(m16) + 0.125f*(m18);
f13 -= + -0.125f*(m17)+ -0.125f*(m18);
f15 -= -0.125f*(m16) + -0.125f*(m18);
f16 -= + 0.125f*(m17)+ 0.125f*(m18);
f17 -= 0.125f*(m16) + -0.125f*(m18);
f18 -= + -0.125f*(m17)+ 0.125f*(m18);
}
__device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int height, int depth)
{
// if (x<0 || x>pitch || y<0 || y>height || z<0 || z>depth) return 0;
// else
return (x+y*pitch+z*height*pitch)+f_num*pitch*height*depth;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__global__ void simple_copy(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// fB[f_mem(10,x,y,z,pitch,height,depth)] = fA[f_mem(10,x,y,z,pitch,height,depth)];
// fB[f_mem(11,x,y,z,pitch,height,depth)] = fA[f_mem(11,x,y,z,pitch,height,depth)];
// fB[f_mem(12,x,y,z,pitch,height,depth)] = fA[f_mem(12,x,y,z,pitch,height,depth)];
// fB[f_mem(13,x,y,z,pitch,height,depth)] = fA[f_mem(13,x,y,z,pitch,height,depth)];
// fB[f_mem(14,x,y,z,pitch,height,depth)] = fA[f_mem(14,x,y,z,pitch,height,depth)];
// fB[f_mem(15,x,y,z,pitch,height,depth)] = fA[f_mem(15,x,y,z,pitch,height,depth)];
// fB[f_mem(16,x,y,z,pitch,height,depth)] = fA[f_mem(16,x,y,z,pitch,height,depth)];
// fB[f_mem(17,x,y,z,pitch,height,depth)] = fA[f_mem(17,x,y,z,pitch,height,depth)];
// fB[f_mem(18,x,y,z,pitch,height,depth)] = fA[f_mem(18,x,y,z,pitch,height,depth)];
// float f0;//,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// float f0 = fA[j+pitch*height*depth];
// float f0 = fA[f_mem(0 ,x,y,z,pitch,height,depth)];
// f0 = tex2D(texRef_f0A ,x,y+height*z);
// f1 = tex2D(texRef_f1A ,x,y+height*z);
// f2 = tex2D(texRef_f2A ,x,y+height*z);
// f3 = tex2D(texRef_f3A ,x,y+height*z);
// f4 = tex2D(texRef_f4A ,x,y+height*z);
// f5 = tex2D(texRef_f5A ,x,y+height*z);
// f6 = tex2D(texRef_f6A ,x,y+height*z);
// f7 = tex2D(texRef_f7A ,x,y+height*z);
// f8 = tex2D(texRef_f8A ,x,y+height*z);
// f9 = tex2D(texRef_f9A ,x,y+height*z);
// f10 = tex2D(texRef_f10A,x,y+height*z);
// f11 = tex2D(texRef_f11A,x,y+height*z);
// f12 = tex2D(texRef_f12A,x,y+height*z);
// f13 = tex2D(texRef_f13A,x,y+height*z);
// f14 = tex2D(texRef_f14A,x,y+height*z);
// f15 = tex2D(texRef_f15A,x,y+height*z);
// f16 = tex2D(texRef_f16A,x,y+height*z);
// f17 = tex2D(texRef_f17A,x,y+height*z);
// f18 = tex2D(texRef_f18A,x,y+height*z);
// float f1 = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f1 = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f2 = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// f3 = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// f4 = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// f5 = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// f6 = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// f7 = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// f8 = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// f9 = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// f10 = fA[f_mem(10,x,y,z,pitch,height,depth)];
// f11 = fA[f_mem(11,x,y,z,pitch,height,depth)];
// f12 = fA[f_mem(12,x,y,z,pitch,height,depth)];
// f13 = fA[f_mem(13,x,y,z,pitch,height,depth)];
// f14 = fA[f_mem(14,x,y,z,pitch,height,depth)];
// f15 = fA[f_mem(15,x,y,z,pitch,height,depth)];
// f16 = fA[f_mem(16,x,y,z,pitch,height,depth)];
// f17 = fA[f_mem(17,x,y,z,pitch,height,depth)];
// f18 = fA[f_mem(18,x,y,z,pitch,height,depth)];
// fB[f_mem(0 ,x,y,z,pitch,height,depth)] = fA[f_mem(0 ,x,y,z,pitch,height,depth)];//+0.01f;
fB[j] = fA[j];//+0.01f;
// fB[j+pitch*height*depth+pitch*height*depth] = f2;
// fB[(x+y*pitch+z*height*pitch)+pitch*height*depth] = f1 ;//+0.01f;
// fB[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1;//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;//+0.01f;
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;//+0.01f;
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;//+0.01f;
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;//+0.01f;
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;//+0.01f;
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;//+0.01f;
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;//+0.01f;
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;//+0.01f;
// fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;//+0.01f;
// fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;//+0.01f;
// fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;//+0.01f;
// fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;//+0.01f;
// fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;//+0.01f;
// fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;//+0.01f;
// fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;//+0.01f;
// fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;//+0.01f;
// fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;//+0.01f;
}
//int const blockx = 192;
//int const blocky = 1;
__global__ void mrt_d_hybAB(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
// int i = x+y*blockDim.x*gridDim.x;
//float u,v,w,rho;//,usqr;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// f1 = fin[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];
// f3 = fin[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];
// f5 = fin[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];
// f7 = fin[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];
// f6 = fin[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];
// f8 = fin[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];
// f10= fin[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];
// f12= fin[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f15= fin[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];
// f17= fin[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];
f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1A ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3A ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7A ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8A ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5A ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6A ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17A,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15A,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10A,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12A,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// f1 = fin[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
// f3 = fin[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
// f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
// f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
// f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
// f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
// f10= fin[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
// f12= fin[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
// f15= fin[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
// f17= fin[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1A ,x-1,y +height*(z));
f3 = tex2D(texRef_f3A ,x+1,y +height*(z));
f5 = tex2D(texRef_f5A ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6A ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7A ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8A ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15A,x-1,y +height*(z+1));
f17= tex2D(texRef_f17A,x+1,y +height*(z+1));
f10= tex2D(texRef_f10A,x-1,y +height*(z-1));
f12= tex2D(texRef_f12A,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_hybBA(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1B ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3B ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7B ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8B ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5B ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6B ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17B,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15B,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10B,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12B,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1B ,x-1,y +height*(z));
f3 = tex2D(texRef_f3B ,x+1,y +height*(z));
f5 = tex2D(texRef_f5B ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6B ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7B ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8B ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15B,x-1,y +height*(z+1));
f17= tex2D(texRef_f17B,x+1,y +height*(z+1));
f10= tex2D(texRef_f10B,x-1,y +height*(z-1));
f12= tex2D(texRef_f12B,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_textAB(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f2 = tex2D(texRef_f4A ,x ,(y+1)+height*(z ));
f4 = tex2D(texRef_f2A ,x ,(y-1)+height*(z ));
f9 = tex2D(texRef_f14A,x ,(y )+height*(z+1));
f14= tex2D(texRef_f9A ,x ,(y )+height*(z-1));
f11= tex2D(texRef_f18A,x ,(y+1)+height*(z+1));
f18= tex2D(texRef_f11A,x ,(y-1)+height*(z-1));
f16= tex2D(texRef_f13A,x ,(y+1)+height*(z-1));
f13= tex2D(texRef_f16A,x ,(y-1)+height*(z+1));
// f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
// f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
// f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
// f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
// f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
// f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
// f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1A ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3A ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7A ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8A ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5A ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6A ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17A,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15A,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10A,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12A,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fin[j];
f2 = tex2D(texRef_f2A ,x ,y-1+height*(z));
f4 = tex2D(texRef_f4A ,x ,y+1+height*(z));
f9 = tex2D(texRef_f9A ,x ,y+1+height*(z-1));
f11= tex2D(texRef_f11A,x ,y-1+height*(z-1));
f13= tex2D(texRef_f13A,x ,y+1+height*(z-1));
f14= tex2D(texRef_f14A,x ,y +height*(z+1));
f16= tex2D(texRef_f16A,x ,y-1+height*(z+1));
f18= tex2D(texRef_f18A,x ,y+1+height*(z+1));
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
// f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
// f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1A ,x-1,y +height*(z));
f3 = tex2D(texRef_f3A ,x+1,y +height*(z));
f5 = tex2D(texRef_f5A ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6A ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7A ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8A ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15A,x-1,y +height*(z+1));
f17= tex2D(texRef_f17A,x+1,y +height*(z+1));
f10= tex2D(texRef_f10A,x-1,y +height*(z-1));
f12= tex2D(texRef_f12A,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_textBA(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f2 = tex2D(texRef_f4B ,x ,(y+1)+height*(z ));
f4 = tex2D(texRef_f2B ,x ,(y-1)+height*(z ));
f9 = tex2D(texRef_f14B,x ,(y )+height*(z+1));
f14= tex2D(texRef_f9B ,x ,(y )+height*(z-1));
f11= tex2D(texRef_f18B,x ,(y+1)+height*(z+1));
f18= tex2D(texRef_f11B,x ,(y-1)+height*(z-1));
f16= tex2D(texRef_f13B,x ,(y+1)+height*(z-1));
f13= tex2D(texRef_f16B,x ,(y-1)+height*(z+1));
// f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
// f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
// f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
// f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
// f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
// f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
// f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1B ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3B ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7B ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8B ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5B ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6B ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17B,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15B,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10B,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12B,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fin[j];
f2 = tex2D(texRef_f2B ,x ,y-1+height*(z));
f4 = tex2D(texRef_f4B ,x ,y+1+height*(z));
f9 = tex2D(texRef_f9B ,x ,y+1+height*(z-1));
f11= tex2D(texRef_f11B,x ,y-1+height*(z-1));
f13= tex2D(texRef_f13B,x ,y+1+height*(z-1));
f14= tex2D(texRef_f14B,x ,y +height*(z+1));
f16= tex2D(texRef_f16B,x ,y-1+height*(z+1));
f18= tex2D(texRef_f18B,x ,y+1+height*(z+1));
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
// f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
// f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1B ,x-1,y +height*(z));
f3 = tex2D(texRef_f3B ,x+1,y +height*(z));
f5 = tex2D(texRef_f5B ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6B ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7B ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8B ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15B,x-1,y +height*(z+1));
f17= tex2D(texRef_f17B,x+1,y +height*(z+1));
f10= tex2D(texRef_f10B,x-1,y +height*(z-1));
f12= tex2D(texRef_f12B,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_shared(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
// int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
// int i = x+y*blockDim.x*gridDim.x;
//float u,v,w,rho;//,usqr;
// int im = image[i];
int im = 0;
if(y == 0 || z == 0 || x == width-1 || y == height-1 || z == depth-1) im = 1;
else if (x == 0) im = 3;
__shared__ float f1_s[256];
__shared__ float f3_s[256];
__shared__ float f5_s[256];
__shared__ float f7_s[256];
__shared__ float f6_s[256];
__shared__ float f8_s[256];
__shared__ float f10_s[256];
__shared__ float f12_s[256];
__shared__ float f15_s[256];
__shared__ float f17_s[256];
f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,height,depth)];//dmax(x-1)
f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,height,depth)];//dmin(x+1,width)
if(y != 0){
f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch,height,depth)];//dmax(x-1)
f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y-1,z ,pitch,height,depth)];//dmax(x-1)
}
else if(y != height){
f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch,height,depth)];//dmin(x+1,width)
f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y+1,z ,pitch,height,depth)];//dmin(x+1,width)
}
if(z != 0){
f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch,height,depth)];//dmax(x-1)
f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch,height,depth)];//dmin(x+1,width)
}
else if(z != depth-1){
f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch,height,depth)];//dmax(x-1)
f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch,height,depth)];//dmin(x+1,width)
}
// f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,height,depth)];//dmax(x-1)
// f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,height,depth)];//dmin(x+1,width)
// f5_s[threadIdx.x] = fA[f_mem(5 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//dmax(x-1)
// f7_s[threadIdx.x] = fA[f_mem(7 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//dmin(x+1,width)
// f6_s[threadIdx.x] = fA[f_mem(6 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//dmin(x+1,width)
// f8_s[threadIdx.x] = fA[f_mem(8 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//dmax(x-1)
// f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,dmax(z-1) ,pitch,height,depth)];//dmax(x-1)
// f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,dmax(z-1) ,pitch,height,depth)];//dmin(x+1,width)
// f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//dmax(x-1)
// f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//dmin(x+1,width)
//
__syncthreads();
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
if(y != 0){
f4 = fA[f_mem(2 ,x ,y-1 ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
}
else if(y != height){
f2 = fA[f_mem(4 ,x ,y+1 ,z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
}
if(z != depth-1){
f9 = fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
f11= fA[f_mem(18,x ,y+1 ,z+1,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
f13= fA[f_mem(16,x ,y-1 ,z+1,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
}
else if(z != 0){
f14= fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
f16= fA[f_mem(13,x ,y+1 ,z-1,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
f18= fA[f_mem(11,x ,y-1 ,z-1,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
}
// f2 = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// f9 = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// f11= fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
// f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
// f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
// f16= fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
// f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
// f1 = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];//fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// f5 = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// f8 = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// f10= fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(10,x,y,z,pitch,height,depth)];
// f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(12,x,y,z,pitch,height,depth)];
// f15= fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(15,x,y,z,pitch,height,depth)];
// f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(17,x,y,z,pitch,height,depth)];
if(threadIdx.x != width-1){
f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f5 = f7_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f8 = f6_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f10=f17_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f15=f12_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
}
else if(threadIdx.x != 0){
f3 = f1_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f7 = f5_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f6 = f8_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f17=f10_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f12=f15_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
}
// f1 = f3_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f3 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f5 = f7_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f7 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f6 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f8 = f6_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f10=f17_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f17=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f12=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f15=f12_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
// fB[j+pitch*height*depth*1 ] = f1 ;
// fB[j+pitch*height*depth*2 ] = f2 ;
// fB[j+pitch*height*depth*3 ] = f3 ;
// fB[j+pitch*height*depth*4 ] = f4 ;
// fB[j+pitch*height*depth*5 ] = f5 ;
// fB[j+pitch*height*depth*6 ] = f6 ;
// fB[j+pitch*height*depth*7 ] = f7 ;
// fB[j+pitch*height*depth*8 ] = f8 ;
// fB[j+pitch*height*depth*9 ] = f9 ;
// fB[j+pitch*height*depth*10] = f10;
// fB[j+pitch*height*depth*11] = f11;
// fB[j+pitch*height*depth*12] = f12;
// fB[j+pitch*height*depth*13] = f13;
// fB[j+pitch*height*depth*14] = f14;
// fB[j+pitch*height*depth*15] = f15;
// fB[j+pitch*height*depth*16] = f16;
// fB[j+pitch*height*depth*17] = f17;
// fB[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
if(y != 0){
f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
}
else if(y != height-1){
f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
}
if(z != depth-1){
f14= fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
}
else if(z != 0){
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
}
if(threadIdx.x != width-1){
f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
}
else if(threadIdx.x != 0){
f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
}
// f0 = fA[j];
// f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
// f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
// f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
// f11= fA[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
// f13= fA[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
// f14= fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];
// f16= fA[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
// f18= fA[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
//
// f1 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
// f3 = f3_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
// f5 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
// f6 = f6_s[dmin(threadIdx.x+1,width)];//fA[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
// f7 = f7_s[dmin(threadIdx.x+1,width)];//fA[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
// f8 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
// f10=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
// f12=f12_s[dmin(threadIdx.x+1,width)];//fA[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
// f15=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
// f17=f17_s[dmin(threadIdx.x+1,width)];//fA[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_single(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
// int i = x+y*blockDim.x*gridDim.x;
//float u,v,w,rho;//,usqr;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f1 = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f2 = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];//fA[f_mem(3 ,x,y,z,pitch,height,depth)];
f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
f5 = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(5 ,x,y,z,pitch,height,depth)];
f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(7 ,x,y,z,pitch,height,depth)];
f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(6 ,x,y,z,pitch,height,depth)];
f8 = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(8 ,x,y,z,pitch,height,depth)];
f9 = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
f10= fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(10,x,y,z,pitch,height,depth)];
f11= fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(12,x,y,z,pitch,height,depth)];
f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
f15= fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(15,x,y,z,pitch,height,depth)];
f16= fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(17,x,y,z,pitch,height,depth)];
f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
// fB[j+pitch*height*depth*1 ] = f1 ;
// fB[j+pitch*height*depth*2 ] = f2 ;
// fB[j+pitch*height*depth*3 ] = f3 ;
// fB[j+pitch*height*depth*4 ] = f4 ;
// fB[j+pitch*height*depth*5 ] = f5 ;
// fB[j+pitch*height*depth*6 ] = f6 ;
// fB[j+pitch*height*depth*7 ] = f7 ;
// fB[j+pitch*height*depth*8 ] = f8 ;
// fB[j+pitch*height*depth*9 ] = f9 ;
// fB[j+pitch*height*depth*10] = f10;
// fB[j+pitch*height*depth*11] = f11;
// fB[j+pitch*height*depth*12] = f12;
// fB[j+pitch*height*depth*13] = f13;
// fB[j+pitch*height*depth*14] = f14;
// fB[j+pitch*height*depth*15] = f15;
// fB[j+pitch*height*depth*16] = f16;
// fB[j+pitch*height*depth*17] = f17;
// fB[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
f3 = fA[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void initialize_single(float *f,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
f[j+0 *pitch*height*depth]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f[j+12*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f[j+14*pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__global__ void initialize(float* f0, float* f1, float* f2,
float* f3, float* f4, float* f5,
float* f6, float* f7, float* f8, float* f9,
float* f10, float* f11, float* f12,
float* f13, float* f14, float* f15,
float* f16, float* f17, float* f18,
int width, int height, size_t pitch)//pitch in elements
//__global__ void initialize(void** f0in, void** f1in,
// int w, int h, int pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
// int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
float u,v,w,rho,feq,usqr;
rho = 1.0f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
feq = 1.0f/3.0f*(rho-1.5f*usqr);
f0[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f1[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f2[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f3[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f4[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f5[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f6[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f7[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f8[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f9[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f10[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f11[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f12[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f13[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f14[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f15[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f16[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f17[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f18[j] = feq;
}
int main(int argc, char *argv[])
{
// float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h, *f9_h;
// float *f10_h, *f11_h, *f12_h, *f13_h, *f14_h, *f15_h, *f16_h, *f17_h, *f18_h;
// float *f0_dA, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA, *f9_dA;
// float *f10_dA, *f11_dA, *f12_dA, *f13_dA, *f14_dA, *f15_dA, *f16_dA, *f17_dA, *f18_dA;
// float *f0_dB, *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB, *f9_dB;
// float *f10_dB, *f11_dB, *f12_dB, *f13_dB, *f14_dB, *f15_dB, *f16_dB, *f17_dB, *f18_dB;
int *image_d, *image_h;
//hipPitchedPtr f0_d;
ofstream output;
output.open ("LBM1_out.dat");
size_t memsize, memsize_int;
size_t pitch;
int i, n, nBlocks, xDim, yDim, zDim,tMax;
float Re, omega, uMax, CharLength;
int BLOCKSIZEx = 256;
int BLOCKSIZEy = 1;
int BLOCKSIZEz = 1;
xDim = 256;
yDim = 128;
zDim = 32;
tMax = 100;
Re = 100.f;//100.f;
uMax = 0.08f;
CharLength = xDim-2.f;
omega = 1.0f/(3.0f*(uMax*CharLength/Re)+0.5f);
cout<<"omega: "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEx<<"x"<<BLOCKSIZEy<<"x"<<BLOCKSIZEz<<endl;
cout<<"grid: "<<xDim<<"x"<<yDim<<"x"<<zDim<<endl;
cout<<"tMax: "<<tMax<<endl;
nBlocks = (xDim/BLOCKSIZEx+xDim%BLOCKSIZEx)*(yDim/BLOCKSIZEy+yDim%BLOCKSIZEy)
*(zDim/BLOCKSIZEz+zDim%BLOCKSIZEz);
int B = BLOCKSIZEx*BLOCKSIZEy*BLOCKSIZEz;
n = nBlocks*B;//block*dimx*dimy
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEx, BLOCKSIZEy, BLOCKSIZEz);
dim3 grid(xDim/BLOCKSIZEx,yDim/BLOCKSIZEy,zDim/BLOCKSIZEz);
memsize = n*sizeof(float);
memsize_int = n*sizeof(int);
hipExtent extent = make_hipExtent(xDim*sizeof(float),yDim,zDim);
image_h = (int *)malloc(memsize_int);
float *fA_h,*fA_d,*fB_d;
fA_h = (float *)malloc(memsize*19);
hipMallocPitch((void **) &fA_d, &pitch, xDim*sizeof(float), yDim*zDim*19);
hipMallocPitch((void **) &fB_d, &pitch, xDim*sizeof(float), yDim*zDim*19);
hipMalloc((void **) &image_d, memsize_int);
cout<<pitch<<endl;
size_t pitch_elements = pitch/sizeof(float);
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
for (i = 0; i < n*19; i++)
{
fA_h[i] = i;
}
for (i = 0; i < n; i++)
{
int x = i%xDim;
int y = (i/xDim)%yDim;
int z = (i/xDim)/yDim;
fA_h[i] = 0;
image_h[i] = 0;
if(x < 1) image_h[i] = 3;//DirichletWest
if(x > xDim-2) image_h[i] = 1;//BB
if(y < 1) image_h[i] = 1;//BB
if(y > yDim-2) image_h[i] = 1;//BB
if(z < 1) image_h[i] = 1;//DirichletWest
if(z > zDim-2) image_h[i] = 1;//BB
}
hipMemcpy(image_d, image_h, memsize_int, hipMemcpyHostToDevice);
if(true)//texture settings
{
texRef_f0B.normalized = false;
texRef_f1B.normalized = false;
texRef_f2B.normalized = false;
texRef_f3B.normalized = false;
texRef_f4B.normalized = false;
texRef_f5B.normalized = false;
texRef_f6B.normalized = false;
texRef_f7B.normalized = false;
texRef_f8B.normalized = false;
texRef_f9B.normalized = false;
texRef_f10B.normalized = false;
texRef_f11B.normalized = false;
texRef_f12B.normalized = false;
texRef_f13B.normalized = false;
texRef_f14B.normalized = false;
texRef_f15B.normalized = false;
texRef_f16B.normalized = false;
texRef_f17B.normalized = false;
texRef_f18B.normalized = false;
texRef_f0B.filterMode = hipFilterModePoint;
texRef_f1B.filterMode = hipFilterModePoint;
texRef_f2B.filterMode = hipFilterModePoint;
texRef_f3B.filterMode = hipFilterModePoint;
texRef_f4B.filterMode = hipFilterModePoint;
texRef_f5B.filterMode = hipFilterModePoint;
texRef_f6B.filterMode = hipFilterModePoint;
texRef_f7B.filterMode = hipFilterModePoint;
texRef_f8B.filterMode = hipFilterModePoint;
texRef_f9B.filterMode = hipFilterModePoint;
texRef_f10B.filterMode = hipFilterModePoint;
texRef_f11B.filterMode = hipFilterModePoint;
texRef_f12B.filterMode = hipFilterModePoint;
texRef_f13B.filterMode = hipFilterModePoint;
texRef_f14B.filterMode = hipFilterModePoint;
texRef_f15B.filterMode = hipFilterModePoint;
texRef_f16B.filterMode = hipFilterModePoint;
texRef_f17B.filterMode = hipFilterModePoint;
texRef_f18B.filterMode = hipFilterModePoint;
texRef_f0A.normalized = false;
texRef_f1A.normalized = false;
texRef_f2A.normalized = false;
texRef_f3A.normalized = false;
texRef_f4A.normalized = false;
texRef_f5A.normalized = false;
texRef_f6A.normalized = false;
texRef_f7A.normalized = false;
texRef_f8A.normalized = false;
texRef_f9A.normalized = false;
texRef_f10A.normalized = false;
texRef_f11A.normalized = false;
texRef_f12A.normalized = false;
texRef_f13A.normalized = false;
texRef_f14A.normalized = false;
texRef_f15A.normalized = false;
texRef_f16A.normalized = false;
texRef_f17A.normalized = false;
texRef_f18A.normalized = false;
texRef_f0A.filterMode = hipFilterModePoint;
texRef_f1A.filterMode = hipFilterModePoint;
texRef_f2A.filterMode = hipFilterModePoint;
texRef_f3A.filterMode = hipFilterModePoint;
texRef_f4A.filterMode = hipFilterModePoint;
texRef_f5A.filterMode = hipFilterModePoint;
texRef_f6A.filterMode = hipFilterModePoint;
texRef_f7A.filterMode = hipFilterModePoint;
texRef_f8A.filterMode = hipFilterModePoint;
texRef_f9A.filterMode = hipFilterModePoint;
texRef_f10A.filterMode = hipFilterModePoint;
texRef_f11A.filterMode = hipFilterModePoint;
texRef_f12A.filterMode = hipFilterModePoint;
texRef_f13A.filterMode = hipFilterModePoint;
texRef_f14A.filterMode = hipFilterModePoint;
texRef_f15A.filterMode = hipFilterModePoint;
texRef_f16A.filterMode = hipFilterModePoint;
texRef_f17A.filterMode = hipFilterModePoint;
texRef_f18A.filterMode = hipFilterModePoint;
}
hipMemcpy2D(fA_d ,pitch,fA_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim*19,hipMemcpyHostToDevice);
hipMemcpy2D(fB_d ,pitch,fA_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim*19,hipMemcpyHostToDevice);
for (i = 0; i < n*19; i++)
{
fA_h[i] = 0;
}
if(true)//bind texture
{
hipBindTexture2D(0,&texRef_f0A, fA_d ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*yDim*zDim ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*yDim*zDim*2 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*yDim*zDim*3 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*yDim*zDim*4 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*yDim*zDim*5 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*yDim*zDim*6 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*yDim*zDim*7 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*yDim*zDim*8 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*yDim*zDim*9 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*yDim*zDim*10,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*yDim*zDim*11,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*yDim*zDim*12,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*yDim*zDim*13,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*yDim*zDim*14,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*yDim*zDim*15,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*yDim*zDim*16,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*yDim*zDim*17,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*yDim*zDim*18,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f0B, fB_d ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*yDim*zDim ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*yDim*zDim*2 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*yDim*zDim*3 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*yDim*zDim*4 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*yDim*zDim*5 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*yDim*zDim*6 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*yDim*zDim*7 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*yDim*zDim*8 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*yDim*zDim*9 ,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*yDim*zDim*10,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*yDim*zDim*11,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*yDim*zDim*12,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*yDim*zDim*13,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*yDim*zDim*14,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*yDim*zDim*15,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*yDim*zDim*16,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*yDim*zDim*17,&desc,xDim,yDim*zDim,pitch);
hipBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*yDim*zDim*18,&desc,xDim,yDim*zDim,pitch);
}
// initialize<<<grid, threads>>>(f0_dA.ptr, f1_dA.ptr, f2_dA.ptr, f3_dA.ptr, f4_dA.ptr, f5_dA.ptr, f6_dA.ptr, f7_dA.ptr, f8_dA.ptr, f9_dA.ptr,
// f10_dA.ptr, f11_dA.ptr, f12_dA.ptr, f13_dA.ptr, f14_dA.ptr, f15_dA.ptr, f16_dA.ptr, f17_dA.ptr, f18_dA.ptr,
// xDim,yDim,pitch);
// initialize<<<grid, threads>>>(f0_dA, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA, f9_dA,
// f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA,
// xDim,yDim,pitch_elements);
hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fA_d,xDim,yDim,zDim,pitch_elements);
// hipFuncSetCacheConfig(mrt_d_single,hipFuncCachePreferL1);
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int t = 0; t<tMax; t=t+2){
//for(int t = 0; t<tMax; t=t+1){
// mrt_d_single<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_single<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_hybAB<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_hybBA<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
hipLaunchKernelGGL(( mrt_d_shared), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
hipLaunchKernelGGL(( mrt_d_shared), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// simple_copy<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(xDim*yDim*zDim*double(tMax/1000000.f))/restime<<"MLUPS)"<<endl;
cout<<xDim<<","<<yDim<<","<<zDim<<","<<tMax<<","<<restime<<endl;
// copytest<<<grid, threads>>>(f10_dA,test_d,xDim,yDim,zDim);
//copytest<<<grid, threads>>>(test_d);
//copytest<<<grid, threads>>>(image_d);
hipUnbindTexture(texRef_f0A);
hipUnbindTexture(texRef_f1A);
hipUnbindTexture(texRef_f2A);
hipUnbindTexture(texRef_f3A);
hipUnbindTexture(texRef_f4A);
hipUnbindTexture(texRef_f5A);
hipUnbindTexture(texRef_f6A);
hipUnbindTexture(texRef_f7A);
hipUnbindTexture(texRef_f8A);
hipUnbindTexture(texRef_f9A);
hipUnbindTexture(texRef_f10A);
hipUnbindTexture(texRef_f11A);
hipUnbindTexture(texRef_f12A);
hipUnbindTexture(texRef_f13A);
hipUnbindTexture(texRef_f14A);
hipUnbindTexture(texRef_f15A);
hipUnbindTexture(texRef_f16A);
hipUnbindTexture(texRef_f17A);
hipUnbindTexture(texRef_f18A);
hipUnbindTexture(texRef_f0B);
hipUnbindTexture(texRef_f1B);
hipUnbindTexture(texRef_f2B);
hipUnbindTexture(texRef_f3B);
hipUnbindTexture(texRef_f4B);
hipUnbindTexture(texRef_f5B);
hipUnbindTexture(texRef_f6B);
hipUnbindTexture(texRef_f7B);
hipUnbindTexture(texRef_f8B);
hipUnbindTexture(texRef_f9B);
hipUnbindTexture(texRef_f10B);
hipUnbindTexture(texRef_f11B);
hipUnbindTexture(texRef_f12B);
hipUnbindTexture(texRef_f13B);
hipUnbindTexture(texRef_f14B);
hipUnbindTexture(texRef_f15B);
hipUnbindTexture(texRef_f16B);
hipUnbindTexture(texRef_f17B);
hipUnbindTexture(texRef_f18B);
// hipMemcpy2D(f0_h,xDim*sizeof(float) , f0_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f1_h,xDim*sizeof(float) , f1_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f2_h,xDim*sizeof(float) , f2_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f3_h,xDim*sizeof(float) , f3_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f4_h,xDim*sizeof(float) , f4_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f5_h,xDim*sizeof(float) , f5_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f6_h,xDim*sizeof(float) , f6_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f7_h,xDim*sizeof(float) , f7_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f8_h,xDim*sizeof(float) , f8_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f9_h,xDim*sizeof(float) , f9_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f10_h,xDim*sizeof(float),f10_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f11_h,xDim*sizeof(float),f11_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f12_h,xDim*sizeof(float),f12_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f13_h,xDim*sizeof(float),f13_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f14_h,xDim*sizeof(float),f14_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f15_h,xDim*sizeof(float),f15_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f16_h,xDim*sizeof(float),f16_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f17_h,xDim*sizeof(float),f17_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
// hipMemcpy2D(f18_h,xDim*sizeof(float),f18_dA,pitch,xDim*sizeof(float),yDim*zDim,hipMemcpyDeviceToHost);
hipMemcpy2D(fA_h,xDim*sizeof(float),fA_d,pitch,xDim*sizeof(float),yDim*zDim*19,hipMemcpyDeviceToHost);
// cout<<"f1_h is "<<f1_h[0]<<endl;
//hipMemcpy(f0_h, f0_d.ptr, memsize, hipMemcpyDeviceToHost);
hipMemcpy(image_h, image_d, memsize_int, hipMemcpyDeviceToHost);
// cout<<image_h[0]<<endl;
// cout<<"test_d: "<<test_h[0]<<endl;
// for(i = 0; i<n; i++){
// cout<<f0_h[i]<<",";
// }
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<xDim<<", J="<<yDim<<", K="<<zDim<<"\n";
int row = 0;
int col = 0;
int dep = 0;
i = 0;
float rho, u, v, w;
int j;
for(dep = 0; dep<zDim; dep++){
for(row = 0; row<yDim; row++){
for(col = 0; col<xDim; col++){
i = dep*xDim*yDim+row*xDim+col;
// rho = 0;
rho = fA_h[i];
for(j = 1; j<19; j++)
rho+=fA_h[i+xDim*yDim*zDim*j];
// rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+
// f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i];
u = fA_h[i+xDim*yDim*zDim*1]-fA_h[i+xDim*yDim*zDim*3]+fA_h[i+xDim*yDim*zDim*5]-fA_h[i+xDim*yDim*zDim*6]-
fA_h[i+xDim*yDim*zDim*7]+fA_h[i+xDim*yDim*zDim*8]+fA_h[i+xDim*yDim*zDim*10]-fA_h[i+xDim*yDim*zDim*12]
+fA_h[i+xDim*yDim*zDim*15]-fA_h[i+xDim*yDim*zDim*17];
v = fA_h[i+xDim*yDim*zDim*2]-fA_h[i+xDim*yDim*zDim*4]+fA_h[i+xDim*yDim*zDim*5]+fA_h[i+xDim*yDim*zDim*6]-fA_h[i+xDim*yDim*zDim*7]-fA_h[i+xDim*yDim*zDim*8]+fA_h[i+xDim*yDim*zDim*11]-fA_h[i+xDim*yDim*zDim*13]+fA_h[i+xDim*yDim*zDim*16]-fA_h[i+xDim*yDim*zDim*18];
w = fA_h[i+xDim*yDim*zDim*9]+fA_h[i+xDim*yDim*zDim*10]+fA_h[i+xDim*yDim*zDim*11]+fA_h[i+xDim*yDim*zDim*12]+fA_h[i+xDim*yDim*zDim*13]-fA_h[i+xDim*yDim*zDim*14]-fA_h[i+xDim*yDim*zDim*15]-fA_h[i+xDim*yDim*zDim*16]-fA_h[i+xDim*yDim*zDim*17]-fA_h[i+xDim*yDim*zDim*18];
output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
// output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+xDim*yDim*zDim*1]<<","<<rho<<endl;
}
}
}
output.close();
hipFree(image_d);
// hipFree(f0_dA);
// hipFree(f1_dA);
// hipFree(f2_dA);
// hipFree(f3_dA);
// hipFree(f4_dA);
// hipFree(f5_dA);
// hipFree(f6_dA);
// hipFree(f7_dA);
// hipFree(f8_dA);
// hipFree(f9_dA);
// hipFree(f10_dA);
// hipFree(f11_dA);
// hipFree(f12_dA);
// hipFree(f13_dA);
// hipFree(f14_dA);
// hipFree(f15_dA);
// hipFree(f16_dA);
// hipFree(f17_dA);
// hipFree(f18_dA);
// hipFree(f0_dB);
// hipFree(f1_dB);
// hipFree(f2_dB);
// hipFree(f3_dB);
// hipFree(f4_dB);
// hipFree(f5_dB);
// hipFree(f6_dB);
// hipFree(f7_dB);
// hipFree(f8_dB);
// hipFree(f9_dB);
// hipFree(f10_dB);
// hipFree(f11_dB);
// hipFree(f12_dB);
// hipFree(f13_dB);
// hipFree(f14_dB);
// hipFree(f15_dB);
// hipFree(f16_dB);
// hipFree(f17_dB);
// hipFree(f18_dB);
hipFree(fA_d);
hipFree(fB_d);
return(0);
}
|
3b54ae95e4c3f0638fa404a52d1baf021c3a1a4e.cu
|
#include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
texture<float,2,cudaReadModeElementType> texRef_f0A;
texture<float,2,cudaReadModeElementType> texRef_f1A;
texture<float,2,cudaReadModeElementType> texRef_f2A;
texture<float,2,cudaReadModeElementType> texRef_f3A;
texture<float,2,cudaReadModeElementType> texRef_f4A;
texture<float,2,cudaReadModeElementType> texRef_f5A;
texture<float,2,cudaReadModeElementType> texRef_f6A;
texture<float,2,cudaReadModeElementType> texRef_f7A;
texture<float,2,cudaReadModeElementType> texRef_f8A;
texture<float,2,cudaReadModeElementType> texRef_f9A;
texture<float,2,cudaReadModeElementType> texRef_f10A;
texture<float,2,cudaReadModeElementType> texRef_f11A;
texture<float,2,cudaReadModeElementType> texRef_f12A;
texture<float,2,cudaReadModeElementType> texRef_f13A;
texture<float,2,cudaReadModeElementType> texRef_f14A;
texture<float,2,cudaReadModeElementType> texRef_f15A;
texture<float,2,cudaReadModeElementType> texRef_f16A;
texture<float,2,cudaReadModeElementType> texRef_f17A;
texture<float,2,cudaReadModeElementType> texRef_f18A;
texture<float,2,cudaReadModeElementType> texRef_f0B;
texture<float,2,cudaReadModeElementType> texRef_f1B;
texture<float,2,cudaReadModeElementType> texRef_f2B;
texture<float,2,cudaReadModeElementType> texRef_f3B;
texture<float,2,cudaReadModeElementType> texRef_f4B;
texture<float,2,cudaReadModeElementType> texRef_f5B;
texture<float,2,cudaReadModeElementType> texRef_f6B;
texture<float,2,cudaReadModeElementType> texRef_f7B;
texture<float,2,cudaReadModeElementType> texRef_f8B;
texture<float,2,cudaReadModeElementType> texRef_f9B;
texture<float,2,cudaReadModeElementType> texRef_f10B;
texture<float,2,cudaReadModeElementType> texRef_f11B;
texture<float,2,cudaReadModeElementType> texRef_f12B;
texture<float,2,cudaReadModeElementType> texRef_f13B;
texture<float,2,cudaReadModeElementType> texRef_f14B;
texture<float,2,cudaReadModeElementType> texRef_f15B;
texture<float,2,cudaReadModeElementType> texRef_f16B;
texture<float,2,cudaReadModeElementType> texRef_f17B;
texture<float,2,cudaReadModeElementType> texRef_f18B;
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
// float usqr = u*u+v*v+w*w;
float usqr = fma(u,u,fma(v,v,w*w));
f0 -= omega*fma(-0.3333333333f,(fma(-1.5f,usqr,rho)),f0);//(f0 -0.3333333333f*(fma(-1.5f,usqr,rho)));//rho-1.5f*usqr));
f1 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f1);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f2);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f3 -= omega*fma(-0.0555555556f,fma(3.0f, u ,rho)+fma(4.5f,u*u,-1.5f*usqr),f3);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -= omega*fma(-0.0555555556f,fma(3.0f, v ,rho)+fma(4.5f,v*v,-1.5f*usqr),f4);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f5 -= omega*fma(-0.0555555556f,fma(3.0f,( u+v),rho)+fma(4.5f,( u+v)*( u+v),-1.5f*usqr),f5 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f6 -= omega*fma(-0.0555555556f,fma(3.0f,(-u+v),rho)+fma(4.5f,(-u+v)*(-u+v),-1.5f*usqr),f6 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f7 -= omega*fma(-0.0555555556f,fma(3.0f,(-u-v),rho)+fma(4.5f,(-u-v)*(-u-v),-1.5f*usqr),f7 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f8 -= omega*fma(-0.0555555556f,fma(3.0f,( u-v),rho)+fma(4.5f,( u-v)*( u-v),-1.5f*usqr),f8 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f9 -= omega*fma(-0.0555555556f,fma(3.0f,( w),rho)+fma(4.5f,( w)*( w),-1.5f*usqr),f9 );//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f10-= omega*fma(-0.0277777778f,fma(3.0f,( u+w),rho)+fma(4.5f,( u+w)*( u+w),-1.5f*usqr),f10);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f11-= omega*fma(-0.0277777778f,fma(3.0f,( v+w),rho)+fma(4.5f,( v+w)*( v+w),-1.5f*usqr),f11);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f12-= omega*fma(-0.0277777778f,fma(3.0f,(-u+w),rho)+fma(4.5f,(-u+w)*(-u+w),-1.5f*usqr),f12);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f13-= omega*fma(-0.0277777778f,fma(3.0f,(-v+w),rho)+fma(4.5f,(-v+w)*(-v+w),-1.5f*usqr),f13);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f14-= omega*fma(-0.0555555556f,fma(3.0f,( -w),rho)+fma(4.5f,( -w)*( -w),-1.5f*usqr),f14);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f15-= omega*fma(-0.0277777778f,fma(3.0f,( u-w),rho)+fma(4.5f,( u-w)*( u-w),-1.5f*usqr),f15);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f16-= omega*fma(-0.0277777778f,fma(3.0f,( v-w),rho)+fma(4.5f,( v-w)*( v-w),-1.5f*usqr),f16);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f17-= omega*fma(-0.0277777778f,fma(3.0f,(-u-w),rho)+fma(4.5f,(-u-w)*(-u-w),-1.5f*usqr),f17);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f18-= omega*fma(-0.0277777778f,fma(3.0f,(-v-w),rho)+fma(4.5f,(-v-w)*(-v-w),-1.5f*usqr),f18);//(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
// f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
__device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ 1.f*f8+ -4.f*f9+ f10+ 1.f*f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
m1 -= -11.f*rho+19.f*(u*u+v*v+w*w);
m2 -= -7.53968254f*(u*u+v*v+w*w);
m4 -= -0.66666667f*u;//qx_eq
m6 -= -0.66666667f*v;//qx_eq
m8 -= -0.66666667f*w;//qx_eq
m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
m11-= (v*v-w*w);//pww_eq
m13-= u*v;//pxy_eq
m14-= v*w;//pyz_eq
m15-= u*w;//pxz_eq
f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f2 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
f4 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
f5 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
f6 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
f7 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
f8 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
f9 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
f10 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
f11 -= +( 0.25f*(m14) )*omega ;
f12 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
f13 -= +( -0.25f*(m14) )*omega ;
f14 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
f15 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
f16 -= +( -0.25f*(m14) )*omega ;
f17 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
f18 -= +( 0.25f*(m14) )*omega ;
f5 -= 0.125f*(m16)+ -0.125f*(m17);
f6 -= -0.125f*(m16)+ -0.125f*(m17);
f7 -= -0.125f*(m16)+ 0.125f*(m17);
f8 -= 0.125f*(m16)+ 0.125f*(m17);
f10 -= -0.125f*(m16) + 0.125f*(m18);
f11 -= + 0.125f*(m17)+ -0.125f*(m18);
f12 -= 0.125f*(m16) + 0.125f*(m18);
f13 -= + -0.125f*(m17)+ -0.125f*(m18);
f15 -= -0.125f*(m16) + -0.125f*(m18);
f16 -= + 0.125f*(m17)+ 0.125f*(m18);
f17 -= 0.125f*(m16) + -0.125f*(m18);
f18 -= + -0.125f*(m17)+ 0.125f*(m18);
}
__device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int height, int depth)
{
// if (x<0 || x>pitch || y<0 || y>height || z<0 || z>depth) return 0;
// else
return (x+y*pitch+z*height*pitch)+f_num*pitch*height*depth;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__global__ void simple_copy(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// fB[f_mem(10,x,y,z,pitch,height,depth)] = fA[f_mem(10,x,y,z,pitch,height,depth)];
// fB[f_mem(11,x,y,z,pitch,height,depth)] = fA[f_mem(11,x,y,z,pitch,height,depth)];
// fB[f_mem(12,x,y,z,pitch,height,depth)] = fA[f_mem(12,x,y,z,pitch,height,depth)];
// fB[f_mem(13,x,y,z,pitch,height,depth)] = fA[f_mem(13,x,y,z,pitch,height,depth)];
// fB[f_mem(14,x,y,z,pitch,height,depth)] = fA[f_mem(14,x,y,z,pitch,height,depth)];
// fB[f_mem(15,x,y,z,pitch,height,depth)] = fA[f_mem(15,x,y,z,pitch,height,depth)];
// fB[f_mem(16,x,y,z,pitch,height,depth)] = fA[f_mem(16,x,y,z,pitch,height,depth)];
// fB[f_mem(17,x,y,z,pitch,height,depth)] = fA[f_mem(17,x,y,z,pitch,height,depth)];
// fB[f_mem(18,x,y,z,pitch,height,depth)] = fA[f_mem(18,x,y,z,pitch,height,depth)];
// float f0;//,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// float f0 = fA[j+pitch*height*depth];
// float f0 = fA[f_mem(0 ,x,y,z,pitch,height,depth)];
// f0 = tex2D(texRef_f0A ,x,y+height*z);
// f1 = tex2D(texRef_f1A ,x,y+height*z);
// f2 = tex2D(texRef_f2A ,x,y+height*z);
// f3 = tex2D(texRef_f3A ,x,y+height*z);
// f4 = tex2D(texRef_f4A ,x,y+height*z);
// f5 = tex2D(texRef_f5A ,x,y+height*z);
// f6 = tex2D(texRef_f6A ,x,y+height*z);
// f7 = tex2D(texRef_f7A ,x,y+height*z);
// f8 = tex2D(texRef_f8A ,x,y+height*z);
// f9 = tex2D(texRef_f9A ,x,y+height*z);
// f10 = tex2D(texRef_f10A,x,y+height*z);
// f11 = tex2D(texRef_f11A,x,y+height*z);
// f12 = tex2D(texRef_f12A,x,y+height*z);
// f13 = tex2D(texRef_f13A,x,y+height*z);
// f14 = tex2D(texRef_f14A,x,y+height*z);
// f15 = tex2D(texRef_f15A,x,y+height*z);
// f16 = tex2D(texRef_f16A,x,y+height*z);
// f17 = tex2D(texRef_f17A,x,y+height*z);
// f18 = tex2D(texRef_f18A,x,y+height*z);
// float f1 = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f1 = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f2 = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// f3 = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// f4 = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// f5 = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// f6 = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// f7 = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// f8 = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// f9 = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// f10 = fA[f_mem(10,x,y,z,pitch,height,depth)];
// f11 = fA[f_mem(11,x,y,z,pitch,height,depth)];
// f12 = fA[f_mem(12,x,y,z,pitch,height,depth)];
// f13 = fA[f_mem(13,x,y,z,pitch,height,depth)];
// f14 = fA[f_mem(14,x,y,z,pitch,height,depth)];
// f15 = fA[f_mem(15,x,y,z,pitch,height,depth)];
// f16 = fA[f_mem(16,x,y,z,pitch,height,depth)];
// f17 = fA[f_mem(17,x,y,z,pitch,height,depth)];
// f18 = fA[f_mem(18,x,y,z,pitch,height,depth)];
// fB[f_mem(0 ,x,y,z,pitch,height,depth)] = fA[f_mem(0 ,x,y,z,pitch,height,depth)];//+0.01f;
fB[j] = fA[j];//+0.01f;
// fB[j+pitch*height*depth+pitch*height*depth] = f2;
// fB[(x+y*pitch+z*height*pitch)+pitch*height*depth] = f1 ;//+0.01f;
// fB[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1;//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;//+0.01f;
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;//+0.01f;
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;//+0.01f;
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;//+0.01f;
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;//+0.01f;
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;//+0.01f;
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;//+0.01f;
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;//+0.01f;
// fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;//+0.01f;
// fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;//+0.01f;
// fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;//+0.01f;
// fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;//+0.01f;
// fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;//+0.01f;
// fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;//+0.01f;
// fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;//+0.01f;
// fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;//+0.01f;
// fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;//+0.01f;
}
//int const blockx = 192;
//int const blocky = 1;
__global__ void mrt_d_hybAB(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
// int i = x+y*blockDim.x*gridDim.x;
//float u,v,w,rho;//,usqr;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// f1 = fin[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];
// f3 = fin[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];
// f5 = fin[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];
// f7 = fin[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];
// f6 = fin[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];
// f8 = fin[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];
// f10= fin[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];
// f12= fin[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f15= fin[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];
// f17= fin[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];
f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1A ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3A ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7A ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8A ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5A ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6A ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17A,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15A,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10A,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12A,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// f1 = fin[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
// f3 = fin[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
// f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
// f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
// f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
// f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
// f10= fin[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
// f12= fin[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
// f15= fin[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
// f17= fin[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1A ,x-1,y +height*(z));
f3 = tex2D(texRef_f3A ,x+1,y +height*(z));
f5 = tex2D(texRef_f5A ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6A ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7A ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8A ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15A,x-1,y +height*(z+1));
f17= tex2D(texRef_f17A,x+1,y +height*(z+1));
f10= tex2D(texRef_f10A,x-1,y +height*(z-1));
f12= tex2D(texRef_f12A,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_hybBA(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1B ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3B ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7B ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8B ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5B ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6B ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17B,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15B,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10B,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12B,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fin[j];
f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1B ,x-1,y +height*(z));
f3 = tex2D(texRef_f3B ,x+1,y +height*(z));
f5 = tex2D(texRef_f5B ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6B ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7B ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8B ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15B,x-1,y +height*(z+1));
f17= tex2D(texRef_f17B,x+1,y +height*(z+1));
f10= tex2D(texRef_f10B,x-1,y +height*(z-1));
f12= tex2D(texRef_f12B,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_textAB(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f2 = tex2D(texRef_f4A ,x ,(y+1)+height*(z ));
f4 = tex2D(texRef_f2A ,x ,(y-1)+height*(z ));
f9 = tex2D(texRef_f14A,x ,(y )+height*(z+1));
f14= tex2D(texRef_f9A ,x ,(y )+height*(z-1));
f11= tex2D(texRef_f18A,x ,(y+1)+height*(z+1));
f18= tex2D(texRef_f11A,x ,(y-1)+height*(z-1));
f16= tex2D(texRef_f13A,x ,(y+1)+height*(z-1));
f13= tex2D(texRef_f16A,x ,(y-1)+height*(z+1));
// f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
// f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
// f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
// f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
// f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
// f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
// f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1A ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3A ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7A ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8A ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5A ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6A ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17A,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15A,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10A,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12A,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fin[j];
f2 = tex2D(texRef_f2A ,x ,y-1+height*(z));
f4 = tex2D(texRef_f4A ,x ,y+1+height*(z));
f9 = tex2D(texRef_f9A ,x ,y+1+height*(z-1));
f11= tex2D(texRef_f11A,x ,y-1+height*(z-1));
f13= tex2D(texRef_f13A,x ,y+1+height*(z-1));
f14= tex2D(texRef_f14A,x ,y +height*(z+1));
f16= tex2D(texRef_f16A,x ,y-1+height*(z+1));
f18= tex2D(texRef_f18A,x ,y+1+height*(z+1));
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
// f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
// f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1A ,x-1,y +height*(z));
f3 = tex2D(texRef_f3A ,x+1,y +height*(z));
f5 = tex2D(texRef_f5A ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6A ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7A ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8A ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15A,x-1,y +height*(z+1));
f17= tex2D(texRef_f17A,x+1,y +height*(z+1));
f10= tex2D(texRef_f10A,x-1,y +height*(z-1));
f12= tex2D(texRef_f12A,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_textBA(float* fin, float* fout,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f2 = tex2D(texRef_f4B ,x ,(y+1)+height*(z ));
f4 = tex2D(texRef_f2B ,x ,(y-1)+height*(z ));
f9 = tex2D(texRef_f14B,x ,(y )+height*(z+1));
f14= tex2D(texRef_f9B ,x ,(y )+height*(z-1));
f11= tex2D(texRef_f18B,x ,(y+1)+height*(z+1));
f18= tex2D(texRef_f11B,x ,(y-1)+height*(z-1));
f16= tex2D(texRef_f13B,x ,(y+1)+height*(z-1));
f13= tex2D(texRef_f16B,x ,(y-1)+height*(z+1));
// f2 = fin[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
// f4 = fin[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
// f9 = fin[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f11= fin[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
// f13= fin[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
// f14= fin[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
// f16= fin[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
// f18= fin[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
f3 = tex2D(texRef_f1B ,x-1,(y )+height*(z));
f1 = tex2D(texRef_f3B ,x+1,(y )+height*(z));
f5 = tex2D(texRef_f7B ,x+1,(y+1)+height*(z));
f6 = tex2D(texRef_f8B ,x-1,(y+1)+height*(z));
f7 = tex2D(texRef_f5B ,x-1,(y-1)+height*(z));
f8 = tex2D(texRef_f6B ,x+1,(y-1)+height*(z));
f10= tex2D(texRef_f17B,x+1,(y )+height*(z+1));
f12= tex2D(texRef_f15B,x-1,(y )+height*(z+1));
f17= tex2D(texRef_f10B,x-1,(y )+height*(z-1));
f15= tex2D(texRef_f12B,x+1,(y )+height*(z-1));
fout[j+pitch*height*depth*1 ] = f1 ;
fout[j+pitch*height*depth*2 ] = f2 ;
fout[j+pitch*height*depth*3 ] = f3 ;
fout[j+pitch*height*depth*4 ] = f4 ;
fout[j+pitch*height*depth*5 ] = f5 ;
fout[j+pitch*height*depth*6 ] = f6 ;
fout[j+pitch*height*depth*7 ] = f7 ;
fout[j+pitch*height*depth*8 ] = f8 ;
fout[j+pitch*height*depth*9 ] = f9 ;
fout[j+pitch*height*depth*10] = f10;
fout[j+pitch*height*depth*11] = f11;
fout[j+pitch*height*depth*12] = f12;
fout[j+pitch*height*depth*13] = f13;
fout[j+pitch*height*depth*14] = f14;
fout[j+pitch*height*depth*15] = f15;
fout[j+pitch*height*depth*16] = f16;
fout[j+pitch*height*depth*17] = f17;
fout[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fin[j];
f2 = tex2D(texRef_f2B ,x ,y-1+height*(z));
f4 = tex2D(texRef_f4B ,x ,y+1+height*(z));
f9 = tex2D(texRef_f9B ,x ,y+1+height*(z-1));
f11= tex2D(texRef_f11B,x ,y-1+height*(z-1));
f13= tex2D(texRef_f13B,x ,y+1+height*(z-1));
f14= tex2D(texRef_f14B,x ,y +height*(z+1));
f16= tex2D(texRef_f16B,x ,y-1+height*(z+1));
f18= tex2D(texRef_f18B,x ,y+1+height*(z+1));
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
// f14= fin[f_mem(14,x ,y ,z+1,pitch,height,depth)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
// f18= fin[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
f1 = tex2D(texRef_f1B ,x-1,y +height*(z));
f3 = tex2D(texRef_f3B ,x+1,y +height*(z));
f5 = tex2D(texRef_f5B ,x-1,y-1+height*(z));
f6 = tex2D(texRef_f6B ,x+1,y-1+height*(z));
f7 = tex2D(texRef_f7B ,x+1,y+1+height*(z));
f8 = tex2D(texRef_f8B ,x-1,y+1+height*(z));
f15= tex2D(texRef_f15B,x-1,y +height*(z+1));
f17= tex2D(texRef_f17B,x+1,y +height*(z+1));
f10= tex2D(texRef_f10B,x-1,y +height*(z-1));
f12= tex2D(texRef_f12B,x+1,y +height*(z-1));
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fout[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fout[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fout[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fout[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fout[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fout[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fout[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fout[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fout[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fout[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fout[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fout[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fout[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fout[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fout[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fout[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fout[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fout[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fout[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_shared(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
// int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
// int i = x+y*blockDim.x*gridDim.x;
//float u,v,w,rho;//,usqr;
// int im = image[i];
int im = 0;
if(y == 0 || z == 0 || x == width-1 || y == height-1 || z == depth-1) im = 1;
else if (x == 0) im = 3;
__shared__ float f1_s[256];
__shared__ float f3_s[256];
__shared__ float f5_s[256];
__shared__ float f7_s[256];
__shared__ float f6_s[256];
__shared__ float f8_s[256];
__shared__ float f10_s[256];
__shared__ float f12_s[256];
__shared__ float f15_s[256];
__shared__ float f17_s[256];
f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,height,depth)];//dmax(x-1)
f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,height,depth)];//dmin(x+1,width)
if(y != 0){
f5_s[threadIdx.x] = fA[f_mem(5 ,x ,y-1,z ,pitch,height,depth)];//dmax(x-1)
f8_s[threadIdx.x] = fA[f_mem(8 ,x ,y-1,z ,pitch,height,depth)];//dmax(x-1)
}
else if(y != height){
f7_s[threadIdx.x] = fA[f_mem(7 ,x ,y+1,z ,pitch,height,depth)];//dmin(x+1,width)
f6_s[threadIdx.x] = fA[f_mem(6 ,x ,y+1,z ,pitch,height,depth)];//dmin(x+1,width)
}
if(z != 0){
f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,z-1,pitch,height,depth)];//dmax(x-1)
f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,z-1,pitch,height,depth)];//dmin(x+1,width)
}
else if(z != depth-1){
f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,z+1,pitch,height,depth)];//dmax(x-1)
f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,z+1,pitch,height,depth)];//dmin(x+1,width)
}
// f1_s[threadIdx.x] = fA[f_mem(1 ,x ,y ,z ,pitch,height,depth)];//dmax(x-1)
// f3_s[threadIdx.x] = fA[f_mem(3 ,x ,y ,z ,pitch,height,depth)];//dmin(x+1,width)
// f5_s[threadIdx.x] = fA[f_mem(5 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//dmax(x-1)
// f7_s[threadIdx.x] = fA[f_mem(7 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//dmin(x+1,width)
// f6_s[threadIdx.x] = fA[f_mem(6 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//dmin(x+1,width)
// f8_s[threadIdx.x] = fA[f_mem(8 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//dmax(x-1)
// f10_s[threadIdx.x] = fA[f_mem(10,x ,y ,dmax(z-1) ,pitch,height,depth)];//dmax(x-1)
// f12_s[threadIdx.x] = fA[f_mem(12,x ,y ,dmax(z-1) ,pitch,height,depth)];//dmin(x+1,width)
// f15_s[threadIdx.x] = fA[f_mem(15,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//dmax(x-1)
// f17_s[threadIdx.x] = fA[f_mem(17,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//dmin(x+1,width)
//
__syncthreads();
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
if(y != 0){
f4 = fA[f_mem(2 ,x ,y-1 ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
}
else if(y != height){
f2 = fA[f_mem(4 ,x ,y+1 ,z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
}
if(z != depth-1){
f9 = fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
f11= fA[f_mem(18,x ,y+1 ,z+1,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
f13= fA[f_mem(16,x ,y-1 ,z+1,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
}
else if(z != 0){
f14= fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
f16= fA[f_mem(13,x ,y+1 ,z-1,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
f18= fA[f_mem(11,x ,y-1 ,z-1,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
}
// f2 = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// f9 = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// f11= fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
// f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
// f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
// f16= fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
// f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
// f1 = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];//fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// f5 = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// f8 = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// f10= fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(10,x,y,z,pitch,height,depth)];
// f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(12,x,y,z,pitch,height,depth)];
// f15= fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(15,x,y,z,pitch,height,depth)];
// f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(17,x,y,z,pitch,height,depth)];
if(threadIdx.x != width-1){
f1 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f5 = f7_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f8 = f6_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f10=f17_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f15=f12_s[threadIdx.x+1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
}
else if(threadIdx.x != 0){
f3 = f1_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f7 = f5_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f6 = f8_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f17=f10_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f12=f15_s[threadIdx.x-1];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
}
// f1 = f3_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f3 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f5 = f7_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f7 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f6 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f8 = f6_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f10=f17_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f17=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f12=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f15=f12_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
// fB[j+pitch*height*depth*1 ] = f1 ;
// fB[j+pitch*height*depth*2 ] = f2 ;
// fB[j+pitch*height*depth*3 ] = f3 ;
// fB[j+pitch*height*depth*4 ] = f4 ;
// fB[j+pitch*height*depth*5 ] = f5 ;
// fB[j+pitch*height*depth*6 ] = f6 ;
// fB[j+pitch*height*depth*7 ] = f7 ;
// fB[j+pitch*height*depth*8 ] = f8 ;
// fB[j+pitch*height*depth*9 ] = f9 ;
// fB[j+pitch*height*depth*10] = f10;
// fB[j+pitch*height*depth*11] = f11;
// fB[j+pitch*height*depth*12] = f12;
// fB[j+pitch*height*depth*13] = f13;
// fB[j+pitch*height*depth*14] = f14;
// fB[j+pitch*height*depth*15] = f15;
// fB[j+pitch*height*depth*16] = f16;
// fB[j+pitch*height*depth*17] = f17;
// fB[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
if(y != 0){
f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
}
else if(y != height-1){
f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
}
if(z != depth-1){
f14= fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
}
else if(z != 0){
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
}
if(threadIdx.x != width-1){
f3 = f3_s[threadIdx.x+1];//fA[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
f6 = f6_s[threadIdx.x+1];//fA[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
f7 = f7_s[threadIdx.x+1];//fA[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
f12=f12_s[threadIdx.x+1];//fA[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
f17=f17_s[threadIdx.x+1];//fA[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
}
else if(threadIdx.x != 0){
f1 = f1_s[threadIdx.x-1];//fA[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
f5 = f5_s[threadIdx.x-1];//fA[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
f8 = f8_s[threadIdx.x-1];//fA[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
f10=f10_s[threadIdx.x-1];//fA[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
f15=f15_s[threadIdx.x-1];//fA[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
}
// f0 = fA[j];
// f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
// f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
// f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
// f11= fA[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
// f13= fA[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
// f14= fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];
// f16= fA[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
// f18= fA[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
//
// f1 = f1_s[dmax(threadIdx.x-1 )];//fA[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
// f3 = f3_s[dmin(threadIdx.x+1,width)];//fA[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
// f5 = f5_s[dmax(threadIdx.x-1 )];//fA[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
// f6 = f6_s[dmin(threadIdx.x+1,width)];//fA[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
// f7 = f7_s[dmin(threadIdx.x+1,width)];//fA[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
// f8 = f8_s[dmax(threadIdx.x-1 )];//fA[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
// f10=f10_s[dmax(threadIdx.x-1 )];//fA[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
// f12=f12_s[dmin(threadIdx.x+1,width)];//fA[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
// f15=f15_s[dmax(threadIdx.x-1 )];//fA[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
// f17=f17_s[dmin(threadIdx.x+1,width)];//fA[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void mrt_d_single(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
// int i = x+y*blockDim.x*gridDim.x;
//float u,v,w,rho;//,usqr;
int im = image[i];
if(im == 1){//BB
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f1 = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f2 = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];//fA[f_mem(3 ,x,y,z,pitch,height,depth)];
f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
f5 = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(5 ,x,y,z,pitch,height,depth)];
f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(7 ,x,y,z,pitch,height,depth)];
f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(6 ,x,y,z,pitch,height,depth)];
f8 = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(8 ,x,y,z,pitch,height,depth)];
f9 = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
f10= fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(10,x,y,z,pitch,height,depth)];
f11= fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(12,x,y,z,pitch,height,depth)];
f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
f15= fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(15,x,y,z,pitch,height,depth)];
f16= fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(17,x,y,z,pitch,height,depth)];
f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
// fB[j+pitch*height*depth*1 ] = f1 ;
// fB[j+pitch*height*depth*2 ] = f2 ;
// fB[j+pitch*height*depth*3 ] = f3 ;
// fB[j+pitch*height*depth*4 ] = f4 ;
// fB[j+pitch*height*depth*5 ] = f5 ;
// fB[j+pitch*height*depth*6 ] = f6 ;
// fB[j+pitch*height*depth*7 ] = f7 ;
// fB[j+pitch*height*depth*8 ] = f8 ;
// fB[j+pitch*height*depth*9 ] = f9 ;
// fB[j+pitch*height*depth*10] = f10;
// fB[j+pitch*height*depth*11] = f11;
// fB[j+pitch*height*depth*12] = f12;
// fB[j+pitch*height*depth*13] = f13;
// fB[j+pitch*height*depth*14] = f14;
// fB[j+pitch*height*depth*15] = f15;
// fB[j+pitch*height*depth*16] = f16;
// fB[j+pitch*height*depth*17] = f17;
// fB[j+pitch*height*depth*18] = f18;
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
f3 = fA[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
//mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void initialize_single(float *f,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
f[j+0 *pitch*height*depth]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f[j+12*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f[j+14*pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__global__ void initialize(float* f0, float* f1, float* f2,
float* f3, float* f4, float* f5,
float* f6, float* f7, float* f8, float* f9,
float* f10, float* f11, float* f12,
float* f13, float* f14, float* f15,
float* f16, float* f17, float* f18,
int width, int height, size_t pitch)//pitch in elements
//__global__ void initialize(void** f0in, void** f1in,
// int w, int h, int pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
// int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
float u,v,w,rho,feq,usqr;
rho = 1.0f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
feq = 1.0f/3.0f*(rho-1.5f*usqr);
f0[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f1[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f2[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f3[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f4[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f5[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f6[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f7[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f8[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f9[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f10[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f11[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f12[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f13[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f14[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f15[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f16[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f17[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f18[j] = feq;
}
int main(int argc, char *argv[])
{
// float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h, *f9_h;
// float *f10_h, *f11_h, *f12_h, *f13_h, *f14_h, *f15_h, *f16_h, *f17_h, *f18_h;
// float *f0_dA, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA, *f9_dA;
// float *f10_dA, *f11_dA, *f12_dA, *f13_dA, *f14_dA, *f15_dA, *f16_dA, *f17_dA, *f18_dA;
// float *f0_dB, *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB, *f9_dB;
// float *f10_dB, *f11_dB, *f12_dB, *f13_dB, *f14_dB, *f15_dB, *f16_dB, *f17_dB, *f18_dB;
int *image_d, *image_h;
//cudaPitchedPtr f0_d;
ofstream output;
output.open ("LBM1_out.dat");
size_t memsize, memsize_int;
size_t pitch;
int i, n, nBlocks, xDim, yDim, zDim,tMax;
float Re, omega, uMax, CharLength;
int BLOCKSIZEx = 256;
int BLOCKSIZEy = 1;
int BLOCKSIZEz = 1;
xDim = 256;
yDim = 128;
zDim = 32;
tMax = 100;
Re = 100.f;//100.f;
uMax = 0.08f;
CharLength = xDim-2.f;
omega = 1.0f/(3.0f*(uMax*CharLength/Re)+0.5f);
cout<<"omega: "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEx<<"x"<<BLOCKSIZEy<<"x"<<BLOCKSIZEz<<endl;
cout<<"grid: "<<xDim<<"x"<<yDim<<"x"<<zDim<<endl;
cout<<"tMax: "<<tMax<<endl;
nBlocks = (xDim/BLOCKSIZEx+xDim%BLOCKSIZEx)*(yDim/BLOCKSIZEy+yDim%BLOCKSIZEy)
*(zDim/BLOCKSIZEz+zDim%BLOCKSIZEz);
int B = BLOCKSIZEx*BLOCKSIZEy*BLOCKSIZEz;
n = nBlocks*B;//block*dimx*dimy
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEx, BLOCKSIZEy, BLOCKSIZEz);
dim3 grid(xDim/BLOCKSIZEx,yDim/BLOCKSIZEy,zDim/BLOCKSIZEz);
memsize = n*sizeof(float);
memsize_int = n*sizeof(int);
cudaExtent extent = make_cudaExtent(xDim*sizeof(float),yDim,zDim);
image_h = (int *)malloc(memsize_int);
float *fA_h,*fA_d,*fB_d;
fA_h = (float *)malloc(memsize*19);
cudaMallocPitch((void **) &fA_d, &pitch, xDim*sizeof(float), yDim*zDim*19);
cudaMallocPitch((void **) &fB_d, &pitch, xDim*sizeof(float), yDim*zDim*19);
cudaMalloc((void **) &image_d, memsize_int);
cout<<pitch<<endl;
size_t pitch_elements = pitch/sizeof(float);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
for (i = 0; i < n*19; i++)
{
fA_h[i] = i;
}
for (i = 0; i < n; i++)
{
int x = i%xDim;
int y = (i/xDim)%yDim;
int z = (i/xDim)/yDim;
fA_h[i] = 0;
image_h[i] = 0;
if(x < 1) image_h[i] = 3;//DirichletWest
if(x > xDim-2) image_h[i] = 1;//BB
if(y < 1) image_h[i] = 1;//BB
if(y > yDim-2) image_h[i] = 1;//BB
if(z < 1) image_h[i] = 1;//DirichletWest
if(z > zDim-2) image_h[i] = 1;//BB
}
cudaMemcpy(image_d, image_h, memsize_int, cudaMemcpyHostToDevice);
if(true)//texture settings
{
texRef_f0B.normalized = false;
texRef_f1B.normalized = false;
texRef_f2B.normalized = false;
texRef_f3B.normalized = false;
texRef_f4B.normalized = false;
texRef_f5B.normalized = false;
texRef_f6B.normalized = false;
texRef_f7B.normalized = false;
texRef_f8B.normalized = false;
texRef_f9B.normalized = false;
texRef_f10B.normalized = false;
texRef_f11B.normalized = false;
texRef_f12B.normalized = false;
texRef_f13B.normalized = false;
texRef_f14B.normalized = false;
texRef_f15B.normalized = false;
texRef_f16B.normalized = false;
texRef_f17B.normalized = false;
texRef_f18B.normalized = false;
texRef_f0B.filterMode = cudaFilterModePoint;
texRef_f1B.filterMode = cudaFilterModePoint;
texRef_f2B.filterMode = cudaFilterModePoint;
texRef_f3B.filterMode = cudaFilterModePoint;
texRef_f4B.filterMode = cudaFilterModePoint;
texRef_f5B.filterMode = cudaFilterModePoint;
texRef_f6B.filterMode = cudaFilterModePoint;
texRef_f7B.filterMode = cudaFilterModePoint;
texRef_f8B.filterMode = cudaFilterModePoint;
texRef_f9B.filterMode = cudaFilterModePoint;
texRef_f10B.filterMode = cudaFilterModePoint;
texRef_f11B.filterMode = cudaFilterModePoint;
texRef_f12B.filterMode = cudaFilterModePoint;
texRef_f13B.filterMode = cudaFilterModePoint;
texRef_f14B.filterMode = cudaFilterModePoint;
texRef_f15B.filterMode = cudaFilterModePoint;
texRef_f16B.filterMode = cudaFilterModePoint;
texRef_f17B.filterMode = cudaFilterModePoint;
texRef_f18B.filterMode = cudaFilterModePoint;
texRef_f0A.normalized = false;
texRef_f1A.normalized = false;
texRef_f2A.normalized = false;
texRef_f3A.normalized = false;
texRef_f4A.normalized = false;
texRef_f5A.normalized = false;
texRef_f6A.normalized = false;
texRef_f7A.normalized = false;
texRef_f8A.normalized = false;
texRef_f9A.normalized = false;
texRef_f10A.normalized = false;
texRef_f11A.normalized = false;
texRef_f12A.normalized = false;
texRef_f13A.normalized = false;
texRef_f14A.normalized = false;
texRef_f15A.normalized = false;
texRef_f16A.normalized = false;
texRef_f17A.normalized = false;
texRef_f18A.normalized = false;
texRef_f0A.filterMode = cudaFilterModePoint;
texRef_f1A.filterMode = cudaFilterModePoint;
texRef_f2A.filterMode = cudaFilterModePoint;
texRef_f3A.filterMode = cudaFilterModePoint;
texRef_f4A.filterMode = cudaFilterModePoint;
texRef_f5A.filterMode = cudaFilterModePoint;
texRef_f6A.filterMode = cudaFilterModePoint;
texRef_f7A.filterMode = cudaFilterModePoint;
texRef_f8A.filterMode = cudaFilterModePoint;
texRef_f9A.filterMode = cudaFilterModePoint;
texRef_f10A.filterMode = cudaFilterModePoint;
texRef_f11A.filterMode = cudaFilterModePoint;
texRef_f12A.filterMode = cudaFilterModePoint;
texRef_f13A.filterMode = cudaFilterModePoint;
texRef_f14A.filterMode = cudaFilterModePoint;
texRef_f15A.filterMode = cudaFilterModePoint;
texRef_f16A.filterMode = cudaFilterModePoint;
texRef_f17A.filterMode = cudaFilterModePoint;
texRef_f18A.filterMode = cudaFilterModePoint;
}
cudaMemcpy2D(fA_d ,pitch,fA_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(fB_d ,pitch,fA_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim*19,cudaMemcpyHostToDevice);
for (i = 0; i < n*19; i++)
{
fA_h[i] = 0;
}
if(true)//bind texture
{
cudaBindTexture2D(0,&texRef_f0A, fA_d ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*yDim*zDim ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*yDim*zDim*2 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*yDim*zDim*3 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*yDim*zDim*4 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*yDim*zDim*5 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*yDim*zDim*6 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*yDim*zDim*7 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*yDim*zDim*8 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*yDim*zDim*9 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*yDim*zDim*10,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*yDim*zDim*11,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*yDim*zDim*12,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*yDim*zDim*13,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*yDim*zDim*14,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*yDim*zDim*15,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*yDim*zDim*16,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*yDim*zDim*17,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*yDim*zDim*18,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f0B, fB_d ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*yDim*zDim ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*yDim*zDim*2 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*yDim*zDim*3 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*yDim*zDim*4 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*yDim*zDim*5 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*yDim*zDim*6 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*yDim*zDim*7 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*yDim*zDim*8 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*yDim*zDim*9 ,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*yDim*zDim*10,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*yDim*zDim*11,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*yDim*zDim*12,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*yDim*zDim*13,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*yDim*zDim*14,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*yDim*zDim*15,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*yDim*zDim*16,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*yDim*zDim*17,&desc,xDim,yDim*zDim,pitch);
cudaBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*yDim*zDim*18,&desc,xDim,yDim*zDim,pitch);
}
// initialize<<<grid, threads>>>(f0_dA.ptr, f1_dA.ptr, f2_dA.ptr, f3_dA.ptr, f4_dA.ptr, f5_dA.ptr, f6_dA.ptr, f7_dA.ptr, f8_dA.ptr, f9_dA.ptr,
// f10_dA.ptr, f11_dA.ptr, f12_dA.ptr, f13_dA.ptr, f14_dA.ptr, f15_dA.ptr, f16_dA.ptr, f17_dA.ptr, f18_dA.ptr,
// xDim,yDim,pitch);
// initialize<<<grid, threads>>>(f0_dA, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA, f9_dA,
// f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA,
// xDim,yDim,pitch_elements);
initialize_single<<<grid, threads>>>(fA_d,xDim,yDim,zDim,pitch_elements);
// cudaFuncSetCacheConfig(mrt_d_single,cudaFuncCachePreferL1);
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int t = 0; t<tMax; t=t+2){
//for(int t = 0; t<tMax; t=t+1){
// mrt_d_single<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_single<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_hybAB<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_hybBA<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
mrt_d_shared<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
mrt_d_shared<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// simple_copy<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(xDim*yDim*zDim*double(tMax/1000000.f))/restime<<"MLUPS)"<<endl;
cout<<xDim<<","<<yDim<<","<<zDim<<","<<tMax<<","<<restime<<endl;
// copytest<<<grid, threads>>>(f10_dA,test_d,xDim,yDim,zDim);
//copytest<<<grid, threads>>>(test_d);
//copytest<<<grid, threads>>>(image_d);
cudaUnbindTexture(texRef_f0A);
cudaUnbindTexture(texRef_f1A);
cudaUnbindTexture(texRef_f2A);
cudaUnbindTexture(texRef_f3A);
cudaUnbindTexture(texRef_f4A);
cudaUnbindTexture(texRef_f5A);
cudaUnbindTexture(texRef_f6A);
cudaUnbindTexture(texRef_f7A);
cudaUnbindTexture(texRef_f8A);
cudaUnbindTexture(texRef_f9A);
cudaUnbindTexture(texRef_f10A);
cudaUnbindTexture(texRef_f11A);
cudaUnbindTexture(texRef_f12A);
cudaUnbindTexture(texRef_f13A);
cudaUnbindTexture(texRef_f14A);
cudaUnbindTexture(texRef_f15A);
cudaUnbindTexture(texRef_f16A);
cudaUnbindTexture(texRef_f17A);
cudaUnbindTexture(texRef_f18A);
cudaUnbindTexture(texRef_f0B);
cudaUnbindTexture(texRef_f1B);
cudaUnbindTexture(texRef_f2B);
cudaUnbindTexture(texRef_f3B);
cudaUnbindTexture(texRef_f4B);
cudaUnbindTexture(texRef_f5B);
cudaUnbindTexture(texRef_f6B);
cudaUnbindTexture(texRef_f7B);
cudaUnbindTexture(texRef_f8B);
cudaUnbindTexture(texRef_f9B);
cudaUnbindTexture(texRef_f10B);
cudaUnbindTexture(texRef_f11B);
cudaUnbindTexture(texRef_f12B);
cudaUnbindTexture(texRef_f13B);
cudaUnbindTexture(texRef_f14B);
cudaUnbindTexture(texRef_f15B);
cudaUnbindTexture(texRef_f16B);
cudaUnbindTexture(texRef_f17B);
cudaUnbindTexture(texRef_f18B);
// cudaMemcpy2D(f0_h,xDim*sizeof(float) , f0_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f1_h,xDim*sizeof(float) , f1_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f2_h,xDim*sizeof(float) , f2_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f3_h,xDim*sizeof(float) , f3_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f4_h,xDim*sizeof(float) , f4_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f5_h,xDim*sizeof(float) , f5_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f6_h,xDim*sizeof(float) , f6_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f7_h,xDim*sizeof(float) , f7_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f8_h,xDim*sizeof(float) , f8_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f9_h,xDim*sizeof(float) , f9_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f10_h,xDim*sizeof(float),f10_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f11_h,xDim*sizeof(float),f11_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f12_h,xDim*sizeof(float),f12_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f13_h,xDim*sizeof(float),f13_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f14_h,xDim*sizeof(float),f14_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f15_h,xDim*sizeof(float),f15_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f16_h,xDim*sizeof(float),f16_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f17_h,xDim*sizeof(float),f17_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f18_h,xDim*sizeof(float),f18_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(fA_h,xDim*sizeof(float),fA_d,pitch,xDim*sizeof(float),yDim*zDim*19,cudaMemcpyDeviceToHost);
// cout<<"f1_h is "<<f1_h[0]<<endl;
//cudaMemcpy(f0_h, f0_d.ptr, memsize, cudaMemcpyDeviceToHost);
cudaMemcpy(image_h, image_d, memsize_int, cudaMemcpyDeviceToHost);
// cout<<image_h[0]<<endl;
// cout<<"test_d: "<<test_h[0]<<endl;
// for(i = 0; i<n; i++){
// cout<<f0_h[i]<<",";
// }
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<xDim<<", J="<<yDim<<", K="<<zDim<<"\n";
int row = 0;
int col = 0;
int dep = 0;
i = 0;
float rho, u, v, w;
int j;
for(dep = 0; dep<zDim; dep++){
for(row = 0; row<yDim; row++){
for(col = 0; col<xDim; col++){
i = dep*xDim*yDim+row*xDim+col;
// rho = 0;
rho = fA_h[i];
for(j = 1; j<19; j++)
rho+=fA_h[i+xDim*yDim*zDim*j];
// rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+
// f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i];
u = fA_h[i+xDim*yDim*zDim*1]-fA_h[i+xDim*yDim*zDim*3]+fA_h[i+xDim*yDim*zDim*5]-fA_h[i+xDim*yDim*zDim*6]-
fA_h[i+xDim*yDim*zDim*7]+fA_h[i+xDim*yDim*zDim*8]+fA_h[i+xDim*yDim*zDim*10]-fA_h[i+xDim*yDim*zDim*12]
+fA_h[i+xDim*yDim*zDim*15]-fA_h[i+xDim*yDim*zDim*17];
v = fA_h[i+xDim*yDim*zDim*2]-fA_h[i+xDim*yDim*zDim*4]+fA_h[i+xDim*yDim*zDim*5]+fA_h[i+xDim*yDim*zDim*6]-fA_h[i+xDim*yDim*zDim*7]-fA_h[i+xDim*yDim*zDim*8]+fA_h[i+xDim*yDim*zDim*11]-fA_h[i+xDim*yDim*zDim*13]+fA_h[i+xDim*yDim*zDim*16]-fA_h[i+xDim*yDim*zDim*18];
w = fA_h[i+xDim*yDim*zDim*9]+fA_h[i+xDim*yDim*zDim*10]+fA_h[i+xDim*yDim*zDim*11]+fA_h[i+xDim*yDim*zDim*12]+fA_h[i+xDim*yDim*zDim*13]-fA_h[i+xDim*yDim*zDim*14]-fA_h[i+xDim*yDim*zDim*15]-fA_h[i+xDim*yDim*zDim*16]-fA_h[i+xDim*yDim*zDim*17]-fA_h[i+xDim*yDim*zDim*18];
output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
// output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+xDim*yDim*zDim*1]<<","<<rho<<endl;
}
}
}
output.close();
cudaFree(image_d);
// cudaFree(f0_dA);
// cudaFree(f1_dA);
// cudaFree(f2_dA);
// cudaFree(f3_dA);
// cudaFree(f4_dA);
// cudaFree(f5_dA);
// cudaFree(f6_dA);
// cudaFree(f7_dA);
// cudaFree(f8_dA);
// cudaFree(f9_dA);
// cudaFree(f10_dA);
// cudaFree(f11_dA);
// cudaFree(f12_dA);
// cudaFree(f13_dA);
// cudaFree(f14_dA);
// cudaFree(f15_dA);
// cudaFree(f16_dA);
// cudaFree(f17_dA);
// cudaFree(f18_dA);
// cudaFree(f0_dB);
// cudaFree(f1_dB);
// cudaFree(f2_dB);
// cudaFree(f3_dB);
// cudaFree(f4_dB);
// cudaFree(f5_dB);
// cudaFree(f6_dB);
// cudaFree(f7_dB);
// cudaFree(f8_dB);
// cudaFree(f9_dB);
// cudaFree(f10_dB);
// cudaFree(f11_dB);
// cudaFree(f12_dB);
// cudaFree(f13_dB);
// cudaFree(f14_dB);
// cudaFree(f15_dB);
// cudaFree(f16_dB);
// cudaFree(f17_dB);
// cudaFree(f18_dB);
cudaFree(fA_d);
cudaFree(fB_d);
return(0);
}
|
aaa3e8b2fc0963069586ac5cd3f46f8082358fa1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VecGeom/management/BVHManager.h"
#include <err.h>
using namespace vecgeom;
__global__ void check_device_bvh_kernel(int id)
{
if (BVH const *bvh = BVHManager::GetBVH(id)) bvh->Print();
}
void check_device_bvh(int id)
{
hipLaunchKernelGGL(( check_device_bvh_kernel), dim3(1), dim3(1), 0, 0, id);
if (hipDeviceSynchronize() != hipSuccess) warnx("Invalid BVH for volume with id = %d\n", id);
}
|
aaa3e8b2fc0963069586ac5cd3f46f8082358fa1.cu
|
#include "VecGeom/management/BVHManager.h"
#include <err.h>
using namespace vecgeom;
__global__ void check_device_bvh_kernel(int id)
{
if (BVH const *bvh = BVHManager::GetBVH(id)) bvh->Print();
}
void check_device_bvh(int id)
{
check_device_bvh_kernel<<<1, 1>>>(id);
if (cudaDeviceSynchronize() != cudaSuccess) warnx("Invalid BVH for volume with id = %d\n", id);
}
|
d4cef54d0f0908a50bc7b612062d089ba091a2ee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates dynamic global memory allocation through device C++ new and delete operators and virtual function declarations available with CUDA 4.0.
#include <stdio.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_cuda.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
const char *sSDKsample = "newdelete";
#include "container.hpp"
/////////////////////////////////////////////////////////////////////////////
//
// Kernels to allocate and instantiate Container objects on the device heap
//
////////////////////////////////////////////////////////////////////////////
__global__
void vectorCreate(Container<int> **g_container, int max_size)
{
// The Vector object and the data storage are allocated in device heap memory.
// This makes it persistent for the lifetime of the CUDA context.
// The grid has only one thread as only a single object instance is needed.
*g_container = new Vector<int>(max_size);
}
/////////////////////////////////////////////////////////////////////////////
//
// Kernels to fill and consume shared Container objects.
//
////////////////////////////////////////////////////////////////////////////
__global__
void containerFill(Container<int> **g_container)
{
// All threads of the grid cooperatively populate the shared Container object with data.
if (threadIdx.x == 0)
{
(*g_container)->push(blockIdx.x);
}
}
__global__
void containerConsume(Container<int> **g_container, int *d_result)
{
// All threads of the grid cooperatively consume the data from the shared Container object.
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int v;
if ((*g_container)->pop(v))
{
d_result[idx] = v;
}
else
{
d_result[idx] = -1;
}
}
/////////////////////////////////////////////////////////////////////////////
//
// Kernel to delete shared Container objects.
//
////////////////////////////////////////////////////////////////////////////
__global__
void containerDelete(Container<int> **g_container)
{
delete *g_container;
}
///////////////////////////////////////////////////////////////////////////////////////////
//
// Kernels to using of placement new to put shared Vector objects and data in shared memory
//
///////////////////////////////////////////////////////////////////////////////////////////
__global__
void placementNew(int *d_result)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<int>)];
__shared__ int __align__(8) s_data[1024];
__shared__ Vector<int> *s_vector;
// The first thread of the block initializes the shared Vector object.
// The placement new operator enables the Vector object and the data array top be placed in shared memory.
if (threadIdx.x == 0)
{
s_vector = new(s_buffer) Vector<int>(1024, s_data);
}
cg::sync(cta);
if ((threadIdx.x & 1) == 0)
{
s_vector->push(threadIdx.x >> 1);
}
// Need to sync as the vector implementation does not support concurrent push/pop operations.
cg::sync(cta);
int v;
if (s_vector->pop(v))
{
d_result[threadIdx.x] = v;
}
else
{
d_result[threadIdx.x] = -1;
}
// Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block)
}
struct ComplexType_t
{
int a;
int b;
float c;
float d;
};
__global__
void complexVector(int *d_result)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<ComplexType_t>)];
__shared__ ComplexType_t __align__(8) s_data[1024];
__shared__ Vector<ComplexType_t> *s_vector;
// The first thread of the block initializes the shared Vector object.
// The placement new operator enables the Vector object and the data array top be placed in shared memory.
if (threadIdx.x == 0)
{
s_vector = new(s_buffer) Vector<ComplexType_t>(1024, s_data);
}
cg::sync(cta);
if ((threadIdx.x & 1) == 0)
{
ComplexType_t data;
data.a = threadIdx.x >> 1;
data.b = blockIdx.x;
data.c = threadIdx.x / (float)(blockDim.x);
data.d = blockIdx.x / (float)(gridDim.x);
s_vector->push(data);
}
cg::sync(cta);
ComplexType_t v;
if (s_vector->pop(v))
{
d_result[threadIdx.x] = v.a;
}
else
{
d_result[threadIdx.x] = -1;
}
// Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block)
}
///////////////////////////////////////////////////////////////////////////////////////////
//
// Host code
//
///////////////////////////////////////////////////////////////////////////////////////////
bool checkResult(int *d_result, int N)
{
std::vector<int> h_result;
h_result.resize(N);
checkCudaErrors(hipMemcpy(&h_result[0], d_result, N*sizeof(int), hipMemcpyDeviceToHost));
std::sort(h_result.begin(), h_result.end());
bool success = true;
bool test = false;
int value=0;
for (int i=0; i < N; ++i)
{
if (h_result[i] != -1)
{
test = true;
}
if (test && (value++) != h_result[i])
{
success = false;
}
}
return success;
}
bool testContainer(Container<int> **d_container, int blocks, int threads)
{
int *d_result;
hipMalloc(&d_result, blocks*threads*sizeof(int));
hipLaunchKernelGGL(( containerFill), dim3(blocks),dim3(threads), 0, 0, d_container);
hipLaunchKernelGGL(( containerConsume), dim3(blocks),dim3(threads), 0, 0, d_container, d_result);
hipLaunchKernelGGL(( containerDelete), dim3(1),dim3(1), 0, 0, d_container);
checkCudaErrors(hipDeviceSynchronize());
bool success = checkResult(d_result, blocks*threads);
hipFree(d_result);
return success;
}
bool testPlacementNew(int threads)
{
int *d_result;
hipMalloc(&d_result, threads*sizeof(int));
hipLaunchKernelGGL(( placementNew), dim3(1), dim3(threads), 0, 0, d_result);
checkCudaErrors(hipDeviceSynchronize());
bool success = checkResult(d_result, threads);
hipFree(d_result);
return success;
}
bool testComplexType(int threads)
{
int *d_result;
hipMalloc(&d_result, threads*sizeof(int));
hipLaunchKernelGGL(( complexVector), dim3(1), dim3(threads), 0, 0, d_result);
checkCudaErrors(hipDeviceSynchronize());
bool success = checkResult(d_result, threads);
hipFree(d_result);
return success;
}
///////////////////////////////////////////////////////////////////////////////////////////
//
// MAIN
//
///////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
int cuda_device = 0;
printf("%s Starting...\n\n", sSDKsample);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
// set the heap size for device size new/delete to 128 MB
checkCudaErrors(hipDeviceSetLimit(hipLimitMallocHeapSize, 128 * (1 << 20)));
Container<int> **d_container;
checkCudaErrors(hipMalloc(&d_container, sizeof(Container<int> **)));
bool bTest = false;
int test_passed = 0;
printf(" > Container = Vector test ");
hipLaunchKernelGGL(( vectorCreate), dim3(1),dim3(1), 0, 0, d_container, 128 * 128);
bTest = testContainer(d_container, 128, 128);
printf(bTest ? "OK\n\n" : "NOT OK\n\n");
test_passed += (bTest ? 1 : 0);
checkCudaErrors(hipFree(d_container));
printf(" > Container = Vector, using placement new on SMEM buffer test ");
bTest = testPlacementNew(1024);
printf(bTest ? "OK\n\n" : "NOT OK\n\n");
test_passed += (bTest ? 1 : 0);
printf(" > Container = Vector, with user defined datatype test ");
bTest = testComplexType(1024);
printf(bTest ? "OK\n\n" : "NOT OK\n\n");
test_passed += (bTest ? 1 : 0);
printf("Test Summary: %d/3 succesfully run\n", test_passed);
exit(test_passed==3 ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
d4cef54d0f0908a50bc7b612062d089ba091a2ee.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates dynamic global memory allocation through device C++ new and delete operators and virtual function declarations available with CUDA 4.0.
#include <stdio.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_cuda.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
const char *sSDKsample = "newdelete";
#include "container.hpp"
/////////////////////////////////////////////////////////////////////////////
//
// Kernels to allocate and instantiate Container objects on the device heap
//
////////////////////////////////////////////////////////////////////////////
__global__
void vectorCreate(Container<int> **g_container, int max_size)
{
// The Vector object and the data storage are allocated in device heap memory.
// This makes it persistent for the lifetime of the CUDA context.
// The grid has only one thread as only a single object instance is needed.
*g_container = new Vector<int>(max_size);
}
/////////////////////////////////////////////////////////////////////////////
//
// Kernels to fill and consume shared Container objects.
//
////////////////////////////////////////////////////////////////////////////
__global__
void containerFill(Container<int> **g_container)
{
// All threads of the grid cooperatively populate the shared Container object with data.
if (threadIdx.x == 0)
{
(*g_container)->push(blockIdx.x);
}
}
__global__
void containerConsume(Container<int> **g_container, int *d_result)
{
// All threads of the grid cooperatively consume the data from the shared Container object.
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int v;
if ((*g_container)->pop(v))
{
d_result[idx] = v;
}
else
{
d_result[idx] = -1;
}
}
/////////////////////////////////////////////////////////////////////////////
//
// Kernel to delete shared Container objects.
//
////////////////////////////////////////////////////////////////////////////
__global__
void containerDelete(Container<int> **g_container)
{
delete *g_container;
}
///////////////////////////////////////////////////////////////////////////////////////////
//
// Kernels to using of placement new to put shared Vector objects and data in shared memory
//
///////////////////////////////////////////////////////////////////////////////////////////
__global__
void placementNew(int *d_result)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<int>)];
__shared__ int __align__(8) s_data[1024];
__shared__ Vector<int> *s_vector;
// The first thread of the block initializes the shared Vector object.
// The placement new operator enables the Vector object and the data array top be placed in shared memory.
if (threadIdx.x == 0)
{
s_vector = new(s_buffer) Vector<int>(1024, s_data);
}
cg::sync(cta);
if ((threadIdx.x & 1) == 0)
{
s_vector->push(threadIdx.x >> 1);
}
// Need to sync as the vector implementation does not support concurrent push/pop operations.
cg::sync(cta);
int v;
if (s_vector->pop(v))
{
d_result[threadIdx.x] = v;
}
else
{
d_result[threadIdx.x] = -1;
}
// Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block)
}
struct ComplexType_t
{
int a;
int b;
float c;
float d;
};
__global__
void complexVector(int *d_result)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ unsigned char __align__(8) s_buffer[sizeof(Vector<ComplexType_t>)];
__shared__ ComplexType_t __align__(8) s_data[1024];
__shared__ Vector<ComplexType_t> *s_vector;
// The first thread of the block initializes the shared Vector object.
// The placement new operator enables the Vector object and the data array top be placed in shared memory.
if (threadIdx.x == 0)
{
s_vector = new(s_buffer) Vector<ComplexType_t>(1024, s_data);
}
cg::sync(cta);
if ((threadIdx.x & 1) == 0)
{
ComplexType_t data;
data.a = threadIdx.x >> 1;
data.b = blockIdx.x;
data.c = threadIdx.x / (float)(blockDim.x);
data.d = blockIdx.x / (float)(gridDim.x);
s_vector->push(data);
}
cg::sync(cta);
ComplexType_t v;
if (s_vector->pop(v))
{
d_result[threadIdx.x] = v.a;
}
else
{
d_result[threadIdx.x] = -1;
}
// Note: deleting objects placed in shared memory is not necessary (lifetime of shared memory is that of the block)
}
///////////////////////////////////////////////////////////////////////////////////////////
//
// Host code
//
///////////////////////////////////////////////////////////////////////////////////////////
bool checkResult(int *d_result, int N)
{
std::vector<int> h_result;
h_result.resize(N);
checkCudaErrors(cudaMemcpy(&h_result[0], d_result, N*sizeof(int), cudaMemcpyDeviceToHost));
std::sort(h_result.begin(), h_result.end());
bool success = true;
bool test = false;
int value=0;
for (int i=0; i < N; ++i)
{
if (h_result[i] != -1)
{
test = true;
}
if (test && (value++) != h_result[i])
{
success = false;
}
}
return success;
}
bool testContainer(Container<int> **d_container, int blocks, int threads)
{
int *d_result;
cudaMalloc(&d_result, blocks*threads*sizeof(int));
containerFill<<<blocks,threads>>>(d_container);
containerConsume<<<blocks,threads>>>(d_container, d_result);
containerDelete<<<1,1>>>(d_container);
checkCudaErrors(cudaDeviceSynchronize());
bool success = checkResult(d_result, blocks*threads);
cudaFree(d_result);
return success;
}
bool testPlacementNew(int threads)
{
int *d_result;
cudaMalloc(&d_result, threads*sizeof(int));
placementNew<<<1, threads>>>(d_result);
checkCudaErrors(cudaDeviceSynchronize());
bool success = checkResult(d_result, threads);
cudaFree(d_result);
return success;
}
bool testComplexType(int threads)
{
int *d_result;
cudaMalloc(&d_result, threads*sizeof(int));
complexVector<<<1, threads>>>(d_result);
checkCudaErrors(cudaDeviceSynchronize());
bool success = checkResult(d_result, threads);
cudaFree(d_result);
return success;
}
///////////////////////////////////////////////////////////////////////////////////////////
//
// MAIN
//
///////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
int cuda_device = 0;
printf("%s Starting...\n\n", sSDKsample);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
// set the heap size for device size new/delete to 128 MB
checkCudaErrors(cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128 * (1 << 20)));
Container<int> **d_container;
checkCudaErrors(cudaMalloc(&d_container, sizeof(Container<int> **)));
bool bTest = false;
int test_passed = 0;
printf(" > Container = Vector test ");
vectorCreate<<<1,1>>>(d_container, 128 * 128);
bTest = testContainer(d_container, 128, 128);
printf(bTest ? "OK\n\n" : "NOT OK\n\n");
test_passed += (bTest ? 1 : 0);
checkCudaErrors(cudaFree(d_container));
printf(" > Container = Vector, using placement new on SMEM buffer test ");
bTest = testPlacementNew(1024);
printf(bTest ? "OK\n\n" : "NOT OK\n\n");
test_passed += (bTest ? 1 : 0);
printf(" > Container = Vector, with user defined datatype test ");
bTest = testComplexType(1024);
printf(bTest ? "OK\n\n" : "NOT OK\n\n");
test_passed += (bTest ? 1 : 0);
printf("Test Summary: %d/3 succesfully run\n", test_passed);
exit(test_passed==3 ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
3fe48279e992fb8bbef995f7dbb560afc492fc80.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// omega = curl(u)
// omega and u are vector fields.
#ifdef CURL_LAUNCH_BOUNDS
__launch_bounds__(NX_TILE*NY_TILE,4)
__global__ void
curl_kernel_lb(const real * __restrict__ u, real * __restrict__ omega, const real xfactor,
const real yfactor, const real zfactor)
#else
__global__ void
curl_kernel_default(const real * __restrict__ u, real * __restrict__ omega, const real xfactor,
const real yfactor, const real zfactor)
#endif
{
__shared__ real us[3][NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Z-wise iteration values
real xzbehind3,
xzbehind2 = u[vfidx(xi, yi, 0, 0)],
xzbehind1 = u[vfidx(xi, yi, 1, 0)],
xzcurrent = u[vfidx(xi, yi, 2, 0)],
xzforward1 = u[vfidx(xi, yi, 3, 0)],
xzforward2 = u[vfidx(xi, yi, 4, 0)],
xzforward3 = u[vfidx(xi, yi, 5, 0)];
real yzbehind3,
yzbehind2 = u[vfidx(xi, yi, 0, 1)],
yzbehind1 = u[vfidx(xi, yi, 1, 1)],
yzcurrent = u[vfidx(xi, yi, 2, 1)],
yzforward1 = u[vfidx(xi, yi, 3, 1)],
yzforward2 = u[vfidx(xi, yi, 4, 1)],
yzforward3 = u[vfidx(xi, yi, 5, 1)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
xzbehind3 = xzbehind2;
xzbehind2 = xzbehind1;
xzbehind1 = xzcurrent;
xzcurrent = xzforward1;
xzforward1 = xzforward2;
xzforward2 = xzforward3;
xzforward3 = u[vfidx(xi, yi, zi + 3, 0)];
yzbehind3 = yzbehind2;
yzbehind2 = yzbehind1;
yzbehind1 = yzcurrent;
yzcurrent = yzforward1;
yzforward1 = yzforward2;
yzforward2 = yzforward3;
yzforward3 = u[vfidx(xi, yi, zi + 3, 1)];
// Load x-y tiles to shared memory
__syncthreads();
us[0][yli][xli] = xzcurrent;
us[1][yli][xli] = yzcurrent;
us[2][yli][xli] = u[vfidx(xi, yi, zi, 2)];
if (threadIdx.x < NGHOST) {
us[1][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 1)];
us[1][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 1)];
us[2][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 2)];
us[2][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 2)];
}
if (threadIdx.y < NGHOST) {
us[0][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 0)];
us[0][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 0)];
us[2][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 2)];
us[2][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 2)];
}
__syncthreads();
// Compute the curl
real d1, d2;
// zdy - ydz
d2 = zfactor * fd1D(yzbehind3, yzbehind2, yzbehind1, yzforward1, yzforward2, yzforward3);
d1 = yfactor * fd1D(us[2][yli - 3][xli], us[2][yli - 2][xli], us[2][yli - 1][xli],
us[2][yli + 1][xli], us[2][yli + 2][xli], us[2][yli + 3][xli]);
omega[vfidx(xi, yi, zi, 0)] = d1 - d2;
// xdz - zdx
d1 = zfactor * fd1D(xzbehind3, xzbehind2, xzbehind1, xzforward1, xzforward2, xzforward3);
d2 = xfactor * fd1D(us[2][yli][xli - 3], us[2][yli][xli - 2], us[2][yli][xli - 1],
us[2][yli][xli + 1], us[2][yli][xli + 2], us[2][yli][xli + 3]);
omega[vfidx(xi, yi, zi, 1)] = d1 - d2;
// ydx - xdy
d1 = xfactor * fd1D(us[1][yli][xli - 3], us[1][yli][xli - 2], us[1][yli][xli - 1],
us[1][yli][xli + 1], us[1][yli][xli + 2], us[1][yli][xli + 3]);
d2 = yfactor * fd1D(us[0][yli - 3][xli], us[0][yli - 2][xli], us[0][yli - 1][xli],
us[0][yli + 1][xli], us[0][yli + 2][xli], us[0][yli + 3][xli]);
omega[vfidx(xi, yi, zi, 2)] = d1 - d2;
}
}
#ifdef CURL_LAUNCH_BOUNDS
void
curl_lb(vf3dgpu &u, vf3dgpu &omega)
{
hipLaunchKernelGGL(( curl_kernel_lb), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, u.mem(), omega.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
#else
void
curl_default(vf3dgpu &u, vf3dgpu &omega)
{
hipLaunchKernelGGL(( curl_kernel_default), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, u.mem(), omega.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
#endif
|
3fe48279e992fb8bbef995f7dbb560afc492fc80.cu
|
// omega = curl(u)
// omega and u are vector fields.
#ifdef CURL_LAUNCH_BOUNDS
__launch_bounds__(NX_TILE*NY_TILE,4)
__global__ void
curl_kernel_lb(const real * __restrict__ u, real * __restrict__ omega, const real xfactor,
const real yfactor, const real zfactor)
#else
__global__ void
curl_kernel_default(const real * __restrict__ u, real * __restrict__ omega, const real xfactor,
const real yfactor, const real zfactor)
#endif
{
__shared__ real us[3][NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Z-wise iteration values
real xzbehind3,
xzbehind2 = u[vfidx(xi, yi, 0, 0)],
xzbehind1 = u[vfidx(xi, yi, 1, 0)],
xzcurrent = u[vfidx(xi, yi, 2, 0)],
xzforward1 = u[vfidx(xi, yi, 3, 0)],
xzforward2 = u[vfidx(xi, yi, 4, 0)],
xzforward3 = u[vfidx(xi, yi, 5, 0)];
real yzbehind3,
yzbehind2 = u[vfidx(xi, yi, 0, 1)],
yzbehind1 = u[vfidx(xi, yi, 1, 1)],
yzcurrent = u[vfidx(xi, yi, 2, 1)],
yzforward1 = u[vfidx(xi, yi, 3, 1)],
yzforward2 = u[vfidx(xi, yi, 4, 1)],
yzforward3 = u[vfidx(xi, yi, 5, 1)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
xzbehind3 = xzbehind2;
xzbehind2 = xzbehind1;
xzbehind1 = xzcurrent;
xzcurrent = xzforward1;
xzforward1 = xzforward2;
xzforward2 = xzforward3;
xzforward3 = u[vfidx(xi, yi, zi + 3, 0)];
yzbehind3 = yzbehind2;
yzbehind2 = yzbehind1;
yzbehind1 = yzcurrent;
yzcurrent = yzforward1;
yzforward1 = yzforward2;
yzforward2 = yzforward3;
yzforward3 = u[vfidx(xi, yi, zi + 3, 1)];
// Load x-y tiles to shared memory
__syncthreads();
us[0][yli][xli] = xzcurrent;
us[1][yli][xli] = yzcurrent;
us[2][yli][xli] = u[vfidx(xi, yi, zi, 2)];
if (threadIdx.x < NGHOST) {
us[1][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 1)];
us[1][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 1)];
us[2][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 2)];
us[2][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 2)];
}
if (threadIdx.y < NGHOST) {
us[0][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 0)];
us[0][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 0)];
us[2][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 2)];
us[2][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 2)];
}
__syncthreads();
// Compute the curl
real d1, d2;
// zdy - ydz
d2 = zfactor * fd1D(yzbehind3, yzbehind2, yzbehind1, yzforward1, yzforward2, yzforward3);
d1 = yfactor * fd1D(us[2][yli - 3][xli], us[2][yli - 2][xli], us[2][yli - 1][xli],
us[2][yli + 1][xli], us[2][yli + 2][xli], us[2][yli + 3][xli]);
omega[vfidx(xi, yi, zi, 0)] = d1 - d2;
// xdz - zdx
d1 = zfactor * fd1D(xzbehind3, xzbehind2, xzbehind1, xzforward1, xzforward2, xzforward3);
d2 = xfactor * fd1D(us[2][yli][xli - 3], us[2][yli][xli - 2], us[2][yli][xli - 1],
us[2][yli][xli + 1], us[2][yli][xli + 2], us[2][yli][xli + 3]);
omega[vfidx(xi, yi, zi, 1)] = d1 - d2;
// ydx - xdy
d1 = xfactor * fd1D(us[1][yli][xli - 3], us[1][yli][xli - 2], us[1][yli][xli - 1],
us[1][yli][xli + 1], us[1][yli][xli + 2], us[1][yli][xli + 3]);
d2 = yfactor * fd1D(us[0][yli - 3][xli], us[0][yli - 2][xli], us[0][yli - 1][xli],
us[0][yli + 1][xli], us[0][yli + 2][xli], us[0][yli + 3][xli]);
omega[vfidx(xi, yi, zi, 2)] = d1 - d2;
}
}
#ifdef CURL_LAUNCH_BOUNDS
void
curl_lb(vf3dgpu &u, vf3dgpu &omega)
{
curl_kernel_lb<<<xy_tile.nblocks, xy_tile.nthreads>>>(u.mem(), omega.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
#else
void
curl_default(vf3dgpu &u, vf3dgpu &omega)
{
curl_kernel_default<<<xy_tile.nblocks, xy_tile.nthreads>>>(u.mem(), omega.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
#endif
|
e88f648c951c5a5513a71486cd60b193072e608b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 16
#define blockDimX 16
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void imregionmax(float * A, float * C, int width)
{
__shared__ float shared_0[32];
float temp[9];
int t;
int i;
t=0;
#pragma unroll
for (i=0; i<3; i=(i+1))
{
{
int it_1;
shared_0[(tidx+0)]=A(((idy+(-1*i))+16), (idx+(-1*0)));
shared_0[(tidx+16)]=A(((idy+(-1*i))+16), ((idx+(-1*0))+16));
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(-1*it_1))+16)];
temp[t]=a;
t=(t+1);
}
__syncthreads();
}
}
{
C(idy, idx)=cal(temp);
}
}
|
e88f648c951c5a5513a71486cd60b193072e608b.cu
|
#define COALESCED_NUM 16
#define blockDimX 16
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void imregionmax(float * A, float * C, int width)
{
__shared__ float shared_0[32];
float temp[9];
int t;
int i;
t=0;
#pragma unroll
for (i=0; i<3; i=(i+1))
{
{
int it_1;
shared_0[(tidx+0)]=A(((idy+(-1*i))+16), (idx+(-1*0)));
shared_0[(tidx+16)]=A(((idy+(-1*i))+16), ((idx+(-1*0))+16));
__syncthreads();
#pragma unroll
for (it_1=0; it_1<3; it_1=(it_1+1))
{
float a;
a=shared_0[((tidx+(-1*it_1))+16)];
temp[t]=a;
t=(t+1);
}
__syncthreads();
}
}
{
C(idy, idx)=cal(temp);
}
}
|
85997e7b4e132b4be305d1c337c103988ce2dd0b.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "SSys.hh"
#include <string>
#include <sstream>
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
#include <thrust/count.h>
#include <thrust/copy.h>
#include "CBufSpec.hh"
#include "TBuf.hh"
#include "TUtil.hh"
#include "TIsHit.hh"
#include "float4x4.h"
#include "OpticksPhoton.h"
#include "DummyPhotonsNPY.hpp"
#include "NPY.hpp"
#include "OPTICKS_LOG.hh"
// nvcc cannot stomach GLM
const char* TMPPath( const char* name)
{
std::stringstream ss ;
ss << "$TMP/thrustrap/TBuf4x4Test/"
<< name
;
std::string s = ss.str();
return strdup(s.c_str());
}
void test_dump44()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, SURFACE_DETECT );
thrust::device_vector<float4> d_ph(num_photons*4) ;
CBufSpec cph = make_bufspec<float4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4>("tph dump<float4>", 1, 0, num_photons*4 ); // stride, begin, end
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
LOG(info) << ")" ;
}
void test_dump4x4()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, SURFACE_DETECT );
thrust::device_vector<float4x4> d_ph(num_photons) ;
CBufSpec cph = make_bufspec<float4x4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
LOG(info) << ")" ;
}
void test_count4x4()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, hitmask );
thrust::device_vector<float4x4> d_ph(num_photons) ;
CBufSpec cph = make_bufspec<float4x4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(d_ph.begin(), d_ph.end(), is_hit );
LOG(info) << "numHit :" << numHit ;
unsigned x_numHit = ph->getNumHit();
assert(x_numHit == numHit );
LOG(info) << ")" ;
}
void test_count4x4_ptr()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, hitmask );
thrust::device_vector<float4x4> d_ph(num_photons) ;
CBufSpec cph = make_bufspec<float4x4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
thrust::device_ptr<float4x4> ptr = thrust::device_pointer_cast((float4x4*)tph.getDevicePtr()) ;
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(ptr, ptr+num_photons, is_hit );
LOG(info) << "numHit :" << numHit ;
unsigned x_numHit = ph->getNumHit();
assert(x_numHit == numHit );
LOG(info) << ")" ;
}
void test_copy4x4()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
NPY<float>* pho = DummyPhotonsNPY::Make(num_photons, hitmask );
thrust::device_vector<float4x4> d_pho(num_photons) ;
CBufSpec cpho = make_bufspec<float4x4>(d_pho);
assert( cpho.size == num_photons );
TBuf tpho("tpho", cpho);
tpho.upload(pho);
tpho.dump<float4x4>("tpho dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(d_pho.begin(), d_pho.end(), is_hit );
LOG(info) << "numHit :" << numHit ;
unsigned x_numHit = pho->getNumHit();
assert(x_numHit == numHit );
thrust::device_vector<float4x4> d_hit(numHit) ;
thrust::copy_if(d_pho.begin(), d_pho.end(), d_hit.begin(), is_hit );
CBufSpec chit = make_bufspec<float4x4>(d_hit);
TBuf thit("thit", chit );
NPY<float>* hit = NPY<float>::make(numHit, 4,4);
thit.download(hit);
const char* path = TMPPath("hit.npy");
hit->save(path);
SSys::npdump(path);
}
void test_copy4x4_ptr()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
unsigned modulo = 10 ;
NPY<float>* pho = DummyPhotonsNPY::Make(num_photons, hitmask, modulo );
unsigned x_numHit = pho->getNumHit();
thrust::device_vector<float4x4> d_pho(num_photons) ;
CBufSpec cpho = make_bufspec<float4x4>(d_pho);
assert( cpho.size == num_photons );
// check can operate from TBuf alone, without help from device_vector
TBuf tpho("tpho", cpho);
tpho.upload(pho);
tpho.dump<float4x4>("tpho dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
thrust::device_ptr<float4x4> ptr = thrust::device_pointer_cast((float4x4*)tpho.getDevicePtr()) ;
assert(num_photons == tpho.getSize());
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(ptr, ptr+num_photons, is_hit );
LOG(info) << "numHit :" << numHit ;
assert(x_numHit == numHit );
thrust::device_vector<float4x4> d_hit(numHit) ;
thrust::copy_if(ptr, ptr+num_photons, d_hit.begin(), is_hit );
CBufSpec chit = make_bufspec<float4x4>(d_hit);
TBuf thit("thit", chit );
assert(thit.getSize() == numHit );
NPY<float>* hit = NPY<float>::make(numHit, 4,4);
thit.download(hit);
const char* path = TMPPath("hit.npy");
hit->save(path);
SSys::npdump(path);
}
void test_copy4x4_encapsulated()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
unsigned modulo = 10 ;
NPY<float>* pho = DummyPhotonsNPY::Make(num_photons, hitmask, modulo );
unsigned x_num_hit = pho->getNumHit() ;
thrust::device_vector<float4x4> d_pho(num_photons) ; // allocate GPU buffer
CBufSpec cpho = make_bufspec<float4x4>(d_pho); // CBufSpec holds (dec_ptr,size,num_bytes) using thrustrap/TUtil_.cu
assert( cpho.dev_ptr != NULL );
assert( cpho.size == num_photons );
LOG(info)
<< " num_photons " << num_photons
<< " sizeof(float4x4) " << sizeof(float4x4)
<< " num_photons*sizeof(float4x4) " << num_photons*sizeof(float4x4)
<< " cpho.num_bytes " << cpho.num_bytes
;
assert( cpho.num_bytes == num_photons*sizeof(float4x4) ); // <-- flakey fails, see notes/issues/longer-thrap-tests-flakey-on-macOS.rst
TBuf tpho("tpho", cpho);
tpho.upload(pho);
tpho.dump4x4("tpho dump4x4", 1, 0, num_photons ); // stride, begin, end
NPY<float>* hit = NPY<float>::make(0,4,4);
tpho.downloadSelection4x4("tpho.downloadSelection4x4", hit, hitmask );
unsigned num_hit = hit->getShape(0) ;
assert( num_hit == x_num_hit );
const char* path = TMPPath("hit.npy");
hit->save(path);
SSys::npdump(path);
}
int main(int argc, char** argv)
{
OPTICKS_LOG(argc, argv);
LOG(info) << argv[0] ;
/*
test_dump44();
test_dump4x4();
test_count4x4();
test_copy4x4();
test_count4x4_ptr();
*/
test_copy4x4_ptr();
test_copy4x4_encapsulated();
hipDeviceSynchronize();
}
|
85997e7b4e132b4be305d1c337c103988ce2dd0b.cu
|
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "SSys.hh"
#include <string>
#include <sstream>
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
#include <thrust/count.h>
#include <thrust/copy.h>
#include "CBufSpec.hh"
#include "TBuf.hh"
#include "TUtil.hh"
#include "TIsHit.hh"
#include "float4x4.h"
#include "OpticksPhoton.h"
#include "DummyPhotonsNPY.hpp"
#include "NPY.hpp"
#include "OPTICKS_LOG.hh"
// nvcc cannot stomach GLM
const char* TMPPath( const char* name)
{
std::stringstream ss ;
ss << "$TMP/thrustrap/TBuf4x4Test/"
<< name
;
std::string s = ss.str();
return strdup(s.c_str());
}
void test_dump44()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, SURFACE_DETECT );
thrust::device_vector<float4> d_ph(num_photons*4) ;
CBufSpec cph = make_bufspec<float4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4>("tph dump<float4>", 1, 0, num_photons*4 ); // stride, begin, end
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
LOG(info) << ")" ;
}
void test_dump4x4()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, SURFACE_DETECT );
thrust::device_vector<float4x4> d_ph(num_photons) ;
CBufSpec cph = make_bufspec<float4x4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
LOG(info) << ")" ;
}
void test_count4x4()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, hitmask );
thrust::device_vector<float4x4> d_ph(num_photons) ;
CBufSpec cph = make_bufspec<float4x4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(d_ph.begin(), d_ph.end(), is_hit );
LOG(info) << "numHit :" << numHit ;
unsigned x_numHit = ph->getNumHit();
assert(x_numHit == numHit );
LOG(info) << ")" ;
}
void test_count4x4_ptr()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
NPY<float>* ph = DummyPhotonsNPY::Make(num_photons, hitmask );
thrust::device_vector<float4x4> d_ph(num_photons) ;
CBufSpec cph = make_bufspec<float4x4>(d_ph);
TBuf tph("tph", cph);
tph.upload(ph);
tph.dump<float4x4>("tph dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
thrust::device_ptr<float4x4> ptr = thrust::device_pointer_cast((float4x4*)tph.getDevicePtr()) ;
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(ptr, ptr+num_photons, is_hit );
LOG(info) << "numHit :" << numHit ;
unsigned x_numHit = ph->getNumHit();
assert(x_numHit == numHit );
LOG(info) << ")" ;
}
void test_copy4x4()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
NPY<float>* pho = DummyPhotonsNPY::Make(num_photons, hitmask );
thrust::device_vector<float4x4> d_pho(num_photons) ;
CBufSpec cpho = make_bufspec<float4x4>(d_pho);
assert( cpho.size == num_photons );
TBuf tpho("tpho", cpho);
tpho.upload(pho);
tpho.dump<float4x4>("tpho dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(d_pho.begin(), d_pho.end(), is_hit );
LOG(info) << "numHit :" << numHit ;
unsigned x_numHit = pho->getNumHit();
assert(x_numHit == numHit );
thrust::device_vector<float4x4> d_hit(numHit) ;
thrust::copy_if(d_pho.begin(), d_pho.end(), d_hit.begin(), is_hit );
CBufSpec chit = make_bufspec<float4x4>(d_hit);
TBuf thit("thit", chit );
NPY<float>* hit = NPY<float>::make(numHit, 4,4);
thit.download(hit);
const char* path = TMPPath("hit.npy");
hit->save(path);
SSys::npdump(path);
}
void test_copy4x4_ptr()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
unsigned modulo = 10 ;
NPY<float>* pho = DummyPhotonsNPY::Make(num_photons, hitmask, modulo );
unsigned x_numHit = pho->getNumHit();
thrust::device_vector<float4x4> d_pho(num_photons) ;
CBufSpec cpho = make_bufspec<float4x4>(d_pho);
assert( cpho.size == num_photons );
// check can operate from TBuf alone, without help from device_vector
TBuf tpho("tpho", cpho);
tpho.upload(pho);
tpho.dump<float4x4>("tpho dump<float4x4>", 1, 0, num_photons ); // stride, begin, end
thrust::device_ptr<float4x4> ptr = thrust::device_pointer_cast((float4x4*)tpho.getDevicePtr()) ;
assert(num_photons == tpho.getSize());
TIsHit is_hit(hitmask) ;
unsigned numHit = thrust::count_if(ptr, ptr+num_photons, is_hit );
LOG(info) << "numHit :" << numHit ;
assert(x_numHit == numHit );
thrust::device_vector<float4x4> d_hit(numHit) ;
thrust::copy_if(ptr, ptr+num_photons, d_hit.begin(), is_hit );
CBufSpec chit = make_bufspec<float4x4>(d_hit);
TBuf thit("thit", chit );
assert(thit.getSize() == numHit );
NPY<float>* hit = NPY<float>::make(numHit, 4,4);
thit.download(hit);
const char* path = TMPPath("hit.npy");
hit->save(path);
SSys::npdump(path);
}
void test_copy4x4_encapsulated()
{
LOG(info) << "(" ;
unsigned num_photons = 100 ;
unsigned hitmask = SURFACE_DETECT ;
unsigned modulo = 10 ;
NPY<float>* pho = DummyPhotonsNPY::Make(num_photons, hitmask, modulo );
unsigned x_num_hit = pho->getNumHit() ;
thrust::device_vector<float4x4> d_pho(num_photons) ; // allocate GPU buffer
CBufSpec cpho = make_bufspec<float4x4>(d_pho); // CBufSpec holds (dec_ptr,size,num_bytes) using thrustrap/TUtil_.cu
assert( cpho.dev_ptr != NULL );
assert( cpho.size == num_photons );
LOG(info)
<< " num_photons " << num_photons
<< " sizeof(float4x4) " << sizeof(float4x4)
<< " num_photons*sizeof(float4x4) " << num_photons*sizeof(float4x4)
<< " cpho.num_bytes " << cpho.num_bytes
;
assert( cpho.num_bytes == num_photons*sizeof(float4x4) ); // <-- flakey fails, see notes/issues/longer-thrap-tests-flakey-on-macOS.rst
TBuf tpho("tpho", cpho);
tpho.upload(pho);
tpho.dump4x4("tpho dump4x4", 1, 0, num_photons ); // stride, begin, end
NPY<float>* hit = NPY<float>::make(0,4,4);
tpho.downloadSelection4x4("tpho.downloadSelection4x4", hit, hitmask );
unsigned num_hit = hit->getShape(0) ;
assert( num_hit == x_num_hit );
const char* path = TMPPath("hit.npy");
hit->save(path);
SSys::npdump(path);
}
int main(int argc, char** argv)
{
OPTICKS_LOG(argc, argv);
LOG(info) << argv[0] ;
/*
test_dump44();
test_dump4x4();
test_count4x4();
test_copy4x4();
test_count4x4_ptr();
*/
test_copy4x4_ptr();
test_copy4x4_encapsulated();
cudaDeviceSynchronize();
}
|
f9bf6cf63a84f34ac6dbdb6de09ac9a65f453508.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <torch/all.h>
#include <torch/python.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_pixel_and_weights(
const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> value,
scalar_t x, scalar_t y, int H, int W, const int B, const int C){
scalar_t ltx = floor(x+0.00001);
scalar_t lty = floor(y+0.00001);
scalar_t rtx = ceil(x+0.00001);
scalar_t rty = floor(y+0.00001);
scalar_t lbx = floor(x+0.00001);
scalar_t lby = ceil(y+0.00001);
scalar_t rbx = ceil(x+0.00001);
scalar_t rby = ceil(y+0.00001);
// printf("Coord: %d %d %d %d\n",int(ltx), int(lty), int(rtx), int(rty));
if (ltx<0 || rbx>(W-1) || lty<0 || lty>(H-1)){
return 0;
}
scalar_t lt, rt, lb, rb;
lt = value[B][C][int(lty)][int(ltx)];
rt = value[B][C][int(rty)][int(rtx)];
lb = value[B][C][int(lby)][int(lbx)];
rb = value[B][C][int(rby)][int(rbx)];
scalar_t w1,w2,w3,w4;
w1 = (rbx - x) * (rby - y);
w2 = (x - ltx) * (rby - y);
w3 = (rbx - x) * (y - lty);
w4 = (x - ltx) * (y - lty);
// printf("Value:\t%f %f %f %f\nWeight:\t%f %f %f %f\n", lt, rt, lb, rb, w1, w2, w3, w4);
// return {lt, rt, lb, rb, w1, w2, w3, w4};
return lt*w1 + rt*w2 + lb*w3 + rb*w4;
}
template <typename scalar_t>
__global__ void gridsample_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> value,
const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid,
torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> result,
const int H, const int W, const int Hout, const int Wout){
// we put channel and batch to block here. Combining to thread will boost speed?
const int B = blockIdx.y;
const int C = blockIdx.x;
const int N = blockDim.x;
const int n_thread = threadIdx.x;
int i;
scalar_t x, y, v;
// std::vector<scalar_t> buff;
for (i=n_thread; i<Hout*Wout; i=i+N){
x = i%Wout;
y = i/Wout;
v = get_pixel_and_weights(value, grid[B][y][x][0]*(W-1), grid[B][y][x][1]*(H-1), H, W, B, C);
// printf("%f\n", v);
// v = buff[0]*buff[4] + buff[1]*buff[5] + buff[2]*buff[6] + buff[3]*buff[7];
result[B][C][y][x] = v;
}
}
torch::Tensor gridsample_cuda(torch::Tensor value, torch::Tensor grid){
// we use the grid within range 0-1
// value: [B,C,H,W]
// grid: [B,Hout,Wout,2]
//
AT_ASSERTM(value.size(0)==grid.size(0), "Batch size of value and grid should be the same");
const int B = value.size(0);
const int C = value.size(1);
const int H = value.size(2);
const int W = value.size(3);
const int Hout = grid.size(1);
const int Wout = grid.size(2);
const int threads = 1024;
const dim3 blocks(C,B);
auto result = torch::zeros({B, C, Hout, Wout}, value.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "generate grid sample", ([&] {
hipLaunchKernelGGL(( gridsample_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
value.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
grid.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
H, W, Hout, Wout);
}));
return result;
}
|
f9bf6cf63a84f34ac6dbdb6de09ac9a65f453508.cu
|
#include <torch/all.h>
#include <torch/python.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_pixel_and_weights(
const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> value,
scalar_t x, scalar_t y, int H, int W, const int B, const int C){
scalar_t ltx = floor(x+0.00001);
scalar_t lty = floor(y+0.00001);
scalar_t rtx = ceil(x+0.00001);
scalar_t rty = floor(y+0.00001);
scalar_t lbx = floor(x+0.00001);
scalar_t lby = ceil(y+0.00001);
scalar_t rbx = ceil(x+0.00001);
scalar_t rby = ceil(y+0.00001);
// printf("Coord: %d %d %d %d\n",int(ltx), int(lty), int(rtx), int(rty));
if (ltx<0 || rbx>(W-1) || lty<0 || lty>(H-1)){
return 0;
}
scalar_t lt, rt, lb, rb;
lt = value[B][C][int(lty)][int(ltx)];
rt = value[B][C][int(rty)][int(rtx)];
lb = value[B][C][int(lby)][int(lbx)];
rb = value[B][C][int(rby)][int(rbx)];
scalar_t w1,w2,w3,w4;
w1 = (rbx - x) * (rby - y);
w2 = (x - ltx) * (rby - y);
w3 = (rbx - x) * (y - lty);
w4 = (x - ltx) * (y - lty);
// printf("Value:\t%f %f %f %f\nWeight:\t%f %f %f %f\n", lt, rt, lb, rb, w1, w2, w3, w4);
// return {lt, rt, lb, rb, w1, w2, w3, w4};
return lt*w1 + rt*w2 + lb*w3 + rb*w4;
}
template <typename scalar_t>
__global__ void gridsample_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> value,
const torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> grid,
torch::PackedTensorAccessor32<scalar_t, 4, torch::RestrictPtrTraits> result,
const int H, const int W, const int Hout, const int Wout){
// we put channel and batch to block here. Combining to thread will boost speed?
const int B = blockIdx.y;
const int C = blockIdx.x;
const int N = blockDim.x;
const int n_thread = threadIdx.x;
int i;
scalar_t x, y, v;
// std::vector<scalar_t> buff;
for (i=n_thread; i<Hout*Wout; i=i+N){
x = i%Wout;
y = i/Wout;
v = get_pixel_and_weights(value, grid[B][y][x][0]*(W-1), grid[B][y][x][1]*(H-1), H, W, B, C);
// printf("%f\n", v);
// v = buff[0]*buff[4] + buff[1]*buff[5] + buff[2]*buff[6] + buff[3]*buff[7];
result[B][C][y][x] = v;
}
}
torch::Tensor gridsample_cuda(torch::Tensor value, torch::Tensor grid){
// we use the grid within range 0-1
// value: [B,C,H,W]
// grid: [B,Hout,Wout,2]
//
AT_ASSERTM(value.size(0)==grid.size(0), "Batch size of value and grid should be the same");
const int B = value.size(0);
const int C = value.size(1);
const int H = value.size(2);
const int W = value.size(3);
const int Hout = grid.size(1);
const int Wout = grid.size(2);
const int threads = 1024;
const dim3 blocks(C,B);
auto result = torch::zeros({B, C, Hout, Wout}, value.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "generate grid sample", ([&] {
gridsample_cuda_kernel<scalar_t><<<blocks, threads>>>(
value.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
grid.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 4, torch::RestrictPtrTraits>(),
H, W, Hout, Wout);
}));
return result;
}
|
632cb254784b82c02f57ef3f29ed5987c65b8460.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "OrderingExternalGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
#include <hip/hip_runtime.h>
/*! \file OrderingExternalGPU.cuh
\brief Defines templated GPU kernel code for calculating the external forces.
*/
//! Kernel for calculating external forces
/*! This kernel is called to calculate the external forces on all N particles. Actual evaluation of the potentials and
forces for each particle is handled via the template class \a evaluator.
\param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions used to implement periodic boundary conditions
\param params per-type array of parameters for the potential
*/
__global__ void gpu_compute_ordering_external_forces_kernel(float4 *d_force,
float *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim box,
const Scalar *order_parameters,
const unsigned int n_wave,
const int3 *lattice_vectors,
const Scalar *phases,
const Scalar interface_width)
{
// start by identifying which particle we are to handle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// read in the position of our particle.
// (MEM TRANSFER: 16 bytes)
Scalar4 posi = d_pos[idx];
// initialize the force to 0
Scalar3 force = make_scalar3(0.0, 0.0, 0.0);
Scalar energy = Scalar(0.0);
Scalar3 L = box.getL();
Scalar3 Xi = make_scalar3((posi.x + (L.x/Scalar(2.0)))/(L.x),
(posi.y + (L.y/Scalar(2.0)))/(L.y),
(posi.z + (L.z/Scalar(2.0)))/(L.z));
Scalar3 r = make_scalar3(posi.x,posi.y,posi.z);
unsigned int typei = __float_as_int(posi.w);
Scalar order_parameter = order_parameters[typei];
Scalar cosine = Scalar(0.0);
Scalar3 deriv = make_scalar3(0.0,0.0,0.0);
for (unsigned int i = 0; i < n_wave; ++i) {
Scalar3 q = make_scalar3(2.0*M_PI*lattice_vectors[i].x/L.x,
2.0*M_PI*lattice_vectors[i].y/L.y,
2.0*M_PI*lattice_vectors[i].z/L.z);
Scalar3 qr = make_scalar3(2.0*M_PI*lattice_vectors[i].x,
2.0*M_PI*lattice_vectors[i].y,
2.0*M_PI*lattice_vectors[i].z);
Scalar arg, q_length, clip_parameter, sine;
arg = dot(q,r)+phases[i];
q_length = dot(q, L);
clip_parameter = Scalar(1.0)/(Scalar(2.0*M_PI)*interface_width);
cosine += clip_parameter*cosf(arg);
sine = -Scalar(1.0)*clip_parameter*sinf(arg);
deriv = deriv + sine*q;
}
Scalar tanH = tanhf(cosine);
energy = order_parameter*tanH;
Scalar sechSq = (Scalar(1.0) - tanH*tanH);
Scalar f = order_parameter*sechSq;
force = f*deriv;
// now that the force calculation is complete, write out the result)
d_force[idx].x = force.x;
d_force[idx].y = force.y;
d_force[idx].z = force.z;
d_force[idx].w = energy;
for (unsigned int i = 0; i < 6; i++)
d_virial[i] = Scalar(0.0);
}
//! Kernel driver that computes lj forces on the GPU for LJForceComputeGPU
/*! \param external_potential_args Other arugments to pass onto the kernel
\param d_params Parameters for the potential
This is just a driver function for gpu_compute_external_forces(), see it for details.
*/
hipError_t gpu_compute_ordering_external_forces(float4 *d_force,
float *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const unsigned int block_size,
const Scalar *d_order_parameters,
const unsigned int n_wave,
const int3 *d_lattice_vectors,
const Scalar *d_phases,
const Scalar interface_width)
{
// setup the grid to run the kernel
dim3 grid( N / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// bind the position texture
hipLaunchKernelGGL(( gpu_compute_ordering_external_forces_kernel)
, dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, box, d_order_parameters, n_wave, d_lattice_vectors, d_phases, interface_width);
return hipSuccess;
}
|
632cb254784b82c02f57ef3f29ed5987c65b8460.cu
|
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "OrderingExternalGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
#include <cuda.h>
/*! \file OrderingExternalGPU.cuh
\brief Defines templated GPU kernel code for calculating the external forces.
*/
//! Kernel for calculating external forces
/*! This kernel is called to calculate the external forces on all N particles. Actual evaluation of the potentials and
forces for each particle is handled via the template class \a evaluator.
\param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions used to implement periodic boundary conditions
\param params per-type array of parameters for the potential
*/
__global__ void gpu_compute_ordering_external_forces_kernel(float4 *d_force,
float *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim box,
const Scalar *order_parameters,
const unsigned int n_wave,
const int3 *lattice_vectors,
const Scalar *phases,
const Scalar interface_width)
{
// start by identifying which particle we are to handle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// read in the position of our particle.
// (MEM TRANSFER: 16 bytes)
Scalar4 posi = d_pos[idx];
// initialize the force to 0
Scalar3 force = make_scalar3(0.0, 0.0, 0.0);
Scalar energy = Scalar(0.0);
Scalar3 L = box.getL();
Scalar3 Xi = make_scalar3((posi.x + (L.x/Scalar(2.0)))/(L.x),
(posi.y + (L.y/Scalar(2.0)))/(L.y),
(posi.z + (L.z/Scalar(2.0)))/(L.z));
Scalar3 r = make_scalar3(posi.x,posi.y,posi.z);
unsigned int typei = __float_as_int(posi.w);
Scalar order_parameter = order_parameters[typei];
Scalar cosine = Scalar(0.0);
Scalar3 deriv = make_scalar3(0.0,0.0,0.0);
for (unsigned int i = 0; i < n_wave; ++i) {
Scalar3 q = make_scalar3(2.0*M_PI*lattice_vectors[i].x/L.x,
2.0*M_PI*lattice_vectors[i].y/L.y,
2.0*M_PI*lattice_vectors[i].z/L.z);
Scalar3 qr = make_scalar3(2.0*M_PI*lattice_vectors[i].x,
2.0*M_PI*lattice_vectors[i].y,
2.0*M_PI*lattice_vectors[i].z);
Scalar arg, q_length, clip_parameter, sine;
arg = dot(q,r)+phases[i];
q_length = dot(q, L);
clip_parameter = Scalar(1.0)/(Scalar(2.0*M_PI)*interface_width);
cosine += clip_parameter*cosf(arg);
sine = -Scalar(1.0)*clip_parameter*sinf(arg);
deriv = deriv + sine*q;
}
Scalar tanH = tanhf(cosine);
energy = order_parameter*tanH;
Scalar sechSq = (Scalar(1.0) - tanH*tanH);
Scalar f = order_parameter*sechSq;
force = f*deriv;
// now that the force calculation is complete, write out the result)
d_force[idx].x = force.x;
d_force[idx].y = force.y;
d_force[idx].z = force.z;
d_force[idx].w = energy;
for (unsigned int i = 0; i < 6; i++)
d_virial[i] = Scalar(0.0);
}
//! Kernel driver that computes lj forces on the GPU for LJForceComputeGPU
/*! \param external_potential_args Other arugments to pass onto the kernel
\param d_params Parameters for the potential
This is just a driver function for gpu_compute_external_forces(), see it for details.
*/
cudaError_t gpu_compute_ordering_external_forces(float4 *d_force,
float *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const unsigned int block_size,
const Scalar *d_order_parameters,
const unsigned int n_wave,
const int3 *d_lattice_vectors,
const Scalar *d_phases,
const Scalar interface_width)
{
// setup the grid to run the kernel
dim3 grid( N / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// bind the position texture
gpu_compute_ordering_external_forces_kernel
<<<grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, box, d_order_parameters, n_wave, d_lattice_vectors, d_phases, interface_width);
return cudaSuccess;
}
|
08e94312a3d37f69b82de1f70d796e97e723d2a9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include "stdio.h"
const int BLOCK_SIZE = 32;
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
__shared__ unsigned char array[BLOCK_SIZE* BLOCK_SIZE];
__shared__ float sh_filter[BLOCK_SIZE * BLOCK_SIZE];
const int halfFW = filterWidth / 2;
const int calcBlock = BLOCK_SIZE - filterWidth +1;
// calculate threads position in the image
int c = blockIdx.x * calcBlock + threadIdx.x - halfFW;
int r = blockIdx.y * calcBlock + threadIdx.y - halfFW;
// calculate threads position in the block
int blockIndex = threadIdx.x + threadIdx.y * BLOCK_SIZE;
int imgIndex = r * numCols + c;
if(threadIdx.x < filterWidth && threadIdx.y < filterWidth){
int filter_index = threadIdx.y * filterWidth + threadIdx.x;
sh_filter[filter_index] = filter[filter_index];
}
if(r < 0 || r>=numRows || c < 0 || c>=numCols){
r = min(max(r, 0), static_cast<int>(numRows - 1));
c = min(max(c, 0), static_cast<int>(numCols - 1));
imgIndex = r * numCols + c;
array[blockIndex] = inputChannel[imgIndex];
return;
}else{
array[blockIndex] = inputChannel[imgIndex];
}
__syncthreads();
if(threadIdx.x < halfFW || threadIdx.x >= BLOCK_SIZE - halfFW ||
threadIdx.y < halfFW || threadIdx.y >= BLOCK_SIZE - halfFW ){
return;
}
float result = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
int image_r = threadIdx.y + filter_r;
int image_c = threadIdx.x + filter_c;
float image_value = static_cast<float>(array[image_r * BLOCK_SIZE + image_c]);
float filter_value = sh_filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
// Naive implementation :
// Each thread reads needed elements from global memory to local memory and computes
// Needs 1.676ms
/*
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(tY + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(tX + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
*/
outputChannel[imgIndex] = result;
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
int tX = blockIdx.x * blockDim.x + threadIdx.x;
int tY = blockIdx.y * blockDim.y + threadIdx.y;
if ( tX >= numCols || tY >= numRows ) {
return;
}
int index = tY * numCols + tX;
unsigned char red = inputImageRGBA[index].x;
unsigned char green = inputImageRGBA[index].y;
unsigned char blue = inputImageRGBA[index].z;
redChannel[index] = red;
greenChannel[index] = green;
blueChannel[index] = blue;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO: DONE
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO: DONE
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: DONE
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
//TODO: DONE
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
// for naive implementation this is fine
// const dim3 gridSize( (numCols+BLOCK_SIZE-1) / BLOCK_SIZE, (numRows + BLOCK_SIZE-1) / BLOCK_SIZE, 1);
// shared memory implementation
// for a region of BLOCK_SIZE^2, only (BLOCK_SIZE-filterWidth+1)^2 pixels will be computed
// that means we need more blocks
const int calcBlock = BLOCK_SIZE - filterWidth +1;
//std::cout << "filterWidth " << filterWidth << "\n";
const dim3 gridSize((numCols + calcBlock-1) / calcBlock, (numCols + calcBlock-1) / calcBlock, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: DONE
// Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols,
d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
08e94312a3d37f69b82de1f70d796e97e723d2a9.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include "stdio.h"
const int BLOCK_SIZE = 32;
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
__shared__ unsigned char array[BLOCK_SIZE* BLOCK_SIZE];
__shared__ float sh_filter[BLOCK_SIZE * BLOCK_SIZE];
const int halfFW = filterWidth / 2;
const int calcBlock = BLOCK_SIZE - filterWidth +1;
// calculate threads position in the image
int c = blockIdx.x * calcBlock + threadIdx.x - halfFW;
int r = blockIdx.y * calcBlock + threadIdx.y - halfFW;
// calculate threads position in the block
int blockIndex = threadIdx.x + threadIdx.y * BLOCK_SIZE;
int imgIndex = r * numCols + c;
if(threadIdx.x < filterWidth && threadIdx.y < filterWidth){
int filter_index = threadIdx.y * filterWidth + threadIdx.x;
sh_filter[filter_index] = filter[filter_index];
}
if(r < 0 || r>=numRows || c < 0 || c>=numCols){
r = min(max(r, 0), static_cast<int>(numRows - 1));
c = min(max(c, 0), static_cast<int>(numCols - 1));
imgIndex = r * numCols + c;
array[blockIndex] = inputChannel[imgIndex];
return;
}else{
array[blockIndex] = inputChannel[imgIndex];
}
__syncthreads();
if(threadIdx.x < halfFW || threadIdx.x >= BLOCK_SIZE - halfFW ||
threadIdx.y < halfFW || threadIdx.y >= BLOCK_SIZE - halfFW ){
return;
}
float result = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
int image_r = threadIdx.y + filter_r;
int image_c = threadIdx.x + filter_c;
float image_value = static_cast<float>(array[image_r * BLOCK_SIZE + image_c]);
float filter_value = sh_filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
// Naive implementation :
// Each thread reads needed elements from global memory to local memory and computes
// Needs 1.676ms
/*
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(tY + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(tX + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
*/
outputChannel[imgIndex] = result;
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
int tX = blockIdx.x * blockDim.x + threadIdx.x;
int tY = blockIdx.y * blockDim.y + threadIdx.y;
if ( tX >= numCols || tY >= numRows ) {
return;
}
int index = tY * numCols + tX;
unsigned char red = inputImageRGBA[index].x;
unsigned char green = inputImageRGBA[index].y;
unsigned char blue = inputImageRGBA[index].z;
redChannel[index] = red;
greenChannel[index] = green;
blueChannel[index] = blue;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO: DONE
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO: DONE
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: DONE
// Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
//TODO: DONE
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
// for naive implementation this is fine
// const dim3 gridSize( (numCols+BLOCK_SIZE-1) / BLOCK_SIZE, (numRows + BLOCK_SIZE-1) / BLOCK_SIZE, 1);
// shared memory implementation
// for a region of BLOCK_SIZE^2, only (BLOCK_SIZE-filterWidth+1)^2 pixels will be computed
// that means we need more blocks
const int calcBlock = BLOCK_SIZE - filterWidth +1;
//std::cout << "filterWidth " << filterWidth << "\n";
const dim3 gridSize((numCols + calcBlock-1) / calcBlock, (numCols + calcBlock-1) / calcBlock, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: DONE
// Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols,
d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
db7d70a145add89d20fe02479148f935715ce0d0.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zmgesellcmmv.cu normal z -> c, Fri Jan 30 19:00:29 2015
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include <rocblas.h>
#define PRECISION_c
#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ] ;
dot += val * dx[ col*num_vecs+idy ];
}
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ] ;
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const magmaFloatComplex * __restrict__ dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ] ;
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
//***************** routines for beta = 0 ************************************//
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
//*************************** end kernels using texture ********************//
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc = hipCreateChannelDesc(32, 32, 32, 32,
hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( magmaFloatComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_texb), dim3(grid), dim3(block), 0, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex), dim3(grid), dim3(block), 0, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_texb), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_texb), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_texb), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_texb), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs, 1 );
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D), dim3(grid), dim3(block), 0, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs );
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D), dim3(grid), dim3(block), Ms, queue ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
|
db7d70a145add89d20fe02479148f935715ce0d0.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zmgesellcmmv.cu normal z -> c, Fri Jan 30 19:00:29 2015
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include <cublas_v2.h>
#define PRECISION_c
#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ] ;
dot += val * dx[ col*num_vecs+idy ];
}
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ] ;
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const magmaFloatComplex * __restrict__ dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ] ;
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
#endif
}
//***************** routines for beta = 0 ************************************//
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_1_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_4_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_8_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_16_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zmgesellptmv_kernel_32_3D_texb(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot1 = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex dot2 = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ] ;
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if( idx < 16 ){
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if( idx < 8 ){
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if( idx < 4 ){
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if( idx < 2 ){
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if( idx == 0 ) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
}
}
}
#endif
}
//*************************** end kernels using texture ********************//
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc = cudaCreateChannelDesc(32, 32, 32, 32,
cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( magmaFloatComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D_texb<<< grid, block, 0, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
zmgesellptmv_kernel_1_3D_tex<<< grid, block, 0, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D_texb<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
zmgesellptmv_kernel_4_3D_tex<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D_texb<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
zmgesellptmv_kernel_8_3D_tex<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D_texb<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
zmgesellptmv_kernel_16_3D_tex<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_C_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D_texb<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, dy );
else
zmgesellptmv_kernel_32_3D_tex<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs, 1 );
zmgesellptmv_kernel_1_3D<<< grid, block, 0, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_4_3D<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_8_3D<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_16_3D<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs );
zmgesellptmv_kernel_32_3D<<< grid, block, Ms, queue >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
|
9a6e28203ce1b43c488dabcbda4def4922fbaa61.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author: brian ichter
Generate state space samples
*/
#include "sampler.cuh"
// TODO: add protection on initial and goal indexes
void createSamplesIID(int seed, float *samples, float *initial, float *goal, float *lo, float *hi)
{
std::srand(seed);
for (int d = 0; d < DIM; ++d) {
for (int i = 0; i < NUM; ++i) {
samples[i*DIM + d] = ((float)std::rand())/RAND_MAX*(hi[d] - lo[d]) + lo[d];
}
}
// replace goal and initial nodes
for (int d = 0; d < DIM; ++d) {
samples[d] = initial[d];
samples[(NUM-1)*DIM + d] = goal[d];
}
}
void createSamplesHalton(int skip, float *samples, float *initial, float *goal, float *lo, float *hi)
{
int numPrimes = 25;
if (skip + DIM > numPrimes) {
std::cout << "in sampler.cu: skip in creating halton seq too high" << std::endl;
return;
}
int bases[] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97};
for (int d = 0; d < DIM; ++d) {
for (int n = 0; n < NUM; ++n) {
samples[n*DIM + d] = localHaltonSingleNumber(n, bases[d + skip])*(hi[d] - lo[d]) + lo[d];
}
}
// replace goal and initial nodes
for (int d = 0; d < DIM; ++d) {
samples[d] = initial[d];
samples[(NUM-1)*DIM + d] = goal[d];
}
}
float localHaltonSingleNumber(int n, int b)
{
float hn = 0;
int n0 = n;
float f = 1/((float) b);
while (n0 > 0) {
float n1 = n0/b;
int r = n0 - n1*b;
hn += f*r;
f = f/b;
n0 = n1;
}
return hn;
}
__global__
void sampleFree(float* obstacles, int obstaclesCount, float* samples, bool* isFreeSamples, float *debugOutput)
{
int node = blockIdx.x * blockDim.x + threadIdx.x;
if (node >= NUM)
return;
float nodeLoc[3];
for (int d = 0; d < 3; ++d) {
nodeLoc[d] = samples[node*DIM+d];
}
for (int obs_idx = 0; obs_idx < obstaclesCount; ++obs_idx) {
bool notFree = true;
for (int d = 0; d < 3; ++d) {
notFree = notFree &&
nodeLoc[d] > obstacles[obs_idx*2*DIM + d] &&
nodeLoc[d] < obstacles[obs_idx*2*DIM + DIM + d];
if (!notFree)
break;
}
if (notFree) {
isFreeSamples[node] = false;
return;
}
}
isFreeSamples[node] = true;
}
__global__
void fillSamples(float* samples, float* samplesAll, int* sampleFreeIdx, bool* isFreeSamples, float *debugOutput)
{
int node = blockIdx.x * blockDim.x + threadIdx.x;
if (node >= NUM)
return;
if (!isFreeSamples[node])
return;
for (int d = 0; d < DIM; ++d) {
samples[sampleFreeIdx[node]*DIM+d] = samplesAll[node*DIM+d];
}
}
__global__
void createSortHeuristic(float* samples, int initial_idx, float* heuristic, int samplesCount)
{
int node = blockIdx.x * blockDim.x + threadIdx.x;
if (node >= samplesCount)
return;
float heuristicValue = 0;
for (int d = 0; d < DIM; ++d) {
float dist = samples[node*DIM + d] - samples[initial_idx*DIM + d];
heuristicValue += dist*dist;
}
for (int d = 0; d < DIM; ++d) {
heuristic[node*DIM+d] = heuristicValue;
}
}
// returns false if not free, true if it is free (i.e., valid)
bool sampleFreePt(float* obstacles, int obstaclesCount, float* sample)
{
float nodeLoc[3];
for (int d = 0; d < 3; ++d) {
nodeLoc[d] = sample[d];
}
for (int obs_idx = 0; obs_idx < obstaclesCount; ++obs_idx) {
bool notFree = true;
for (int d = 0; d < 3; ++d) {
notFree = notFree &&
nodeLoc[d] > obstacles[obs_idx*2*DIM + d] &&
nodeLoc[d] < obstacles[obs_idx*2*DIM + DIM + d];
if (!notFree)
break;
}
if (notFree) {
return false;
}
}
return true;
}
|
9a6e28203ce1b43c488dabcbda4def4922fbaa61.cu
|
/*
Author: brian ichter
Generate state space samples
*/
#include "sampler.cuh"
// TODO: add protection on initial and goal indexes
void createSamplesIID(int seed, float *samples, float *initial, float *goal, float *lo, float *hi)
{
std::srand(seed);
for (int d = 0; d < DIM; ++d) {
for (int i = 0; i < NUM; ++i) {
samples[i*DIM + d] = ((float)std::rand())/RAND_MAX*(hi[d] - lo[d]) + lo[d];
}
}
// replace goal and initial nodes
for (int d = 0; d < DIM; ++d) {
samples[d] = initial[d];
samples[(NUM-1)*DIM + d] = goal[d];
}
}
void createSamplesHalton(int skip, float *samples, float *initial, float *goal, float *lo, float *hi)
{
int numPrimes = 25;
if (skip + DIM > numPrimes) {
std::cout << "in sampler.cu: skip in creating halton seq too high" << std::endl;
return;
}
int bases[] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97};
for (int d = 0; d < DIM; ++d) {
for (int n = 0; n < NUM; ++n) {
samples[n*DIM + d] = localHaltonSingleNumber(n, bases[d + skip])*(hi[d] - lo[d]) + lo[d];
}
}
// replace goal and initial nodes
for (int d = 0; d < DIM; ++d) {
samples[d] = initial[d];
samples[(NUM-1)*DIM + d] = goal[d];
}
}
float localHaltonSingleNumber(int n, int b)
{
float hn = 0;
int n0 = n;
float f = 1/((float) b);
while (n0 > 0) {
float n1 = n0/b;
int r = n0 - n1*b;
hn += f*r;
f = f/b;
n0 = n1;
}
return hn;
}
__global__
void sampleFree(float* obstacles, int obstaclesCount, float* samples, bool* isFreeSamples, float *debugOutput)
{
int node = blockIdx.x * blockDim.x + threadIdx.x;
if (node >= NUM)
return;
float nodeLoc[3];
for (int d = 0; d < 3; ++d) {
nodeLoc[d] = samples[node*DIM+d];
}
for (int obs_idx = 0; obs_idx < obstaclesCount; ++obs_idx) {
bool notFree = true;
for (int d = 0; d < 3; ++d) {
notFree = notFree &&
nodeLoc[d] > obstacles[obs_idx*2*DIM + d] &&
nodeLoc[d] < obstacles[obs_idx*2*DIM + DIM + d];
if (!notFree)
break;
}
if (notFree) {
isFreeSamples[node] = false;
return;
}
}
isFreeSamples[node] = true;
}
__global__
void fillSamples(float* samples, float* samplesAll, int* sampleFreeIdx, bool* isFreeSamples, float *debugOutput)
{
int node = blockIdx.x * blockDim.x + threadIdx.x;
if (node >= NUM)
return;
if (!isFreeSamples[node])
return;
for (int d = 0; d < DIM; ++d) {
samples[sampleFreeIdx[node]*DIM+d] = samplesAll[node*DIM+d];
}
}
__global__
void createSortHeuristic(float* samples, int initial_idx, float* heuristic, int samplesCount)
{
int node = blockIdx.x * blockDim.x + threadIdx.x;
if (node >= samplesCount)
return;
float heuristicValue = 0;
for (int d = 0; d < DIM; ++d) {
float dist = samples[node*DIM + d] - samples[initial_idx*DIM + d];
heuristicValue += dist*dist;
}
for (int d = 0; d < DIM; ++d) {
heuristic[node*DIM+d] = heuristicValue;
}
}
// returns false if not free, true if it is free (i.e., valid)
bool sampleFreePt(float* obstacles, int obstaclesCount, float* sample)
{
float nodeLoc[3];
for (int d = 0; d < 3; ++d) {
nodeLoc[d] = sample[d];
}
for (int obs_idx = 0; obs_idx < obstaclesCount; ++obs_idx) {
bool notFree = true;
for (int d = 0; d < 3; ++d) {
notFree = notFree &&
nodeLoc[d] > obstacles[obs_idx*2*DIM + d] &&
nodeLoc[d] < obstacles[obs_idx*2*DIM + DIM + d];
if (!notFree)
break;
}
if (notFree) {
return false;
}
}
return true;
}
|
82cb0f911a21db2d90c61547f1cc404d26fa77e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
// STUDENTS: be sure to set the single define at the top of this file,
// depending on which machines you are running on.
#include "im1.h"
#define MAX_SHARED_MEM 16384
//Constant buffer for the gaussian kernel
#define D_MAX 50 //10000Bytes < 64KB
__constant__ float d_const_Gaussian[D_MAX*D_MAX];
// handy error macro:
#define GPU_CHECKERROR( err ) (gpuCheckError( err, __FILE__, __LINE__ ))
inline static void gpuCheckError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
//Code to display device properties.
// http://gpucoder.livejournal.com/1064.html
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
//
// your __global__ kernel can go here, if you want:
//
__global__ void blurr_GPU(float * d_imageArray,float * d_imageArrayResult, float * d_dev_Gaussian, int w,int h, int r){
int d = 2*r + 1;
extern __shared__ float picBlock[];
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if(x >= w || y >= h)
return;
int idx;
unsigned int idxN, idxS, idxW, idxE;
float tempR = 0.0;
float tempG = 0.0;
float tempB = 0.0;
float Gaus_val = 0.0;
int shDim_x = (blockDim.x + 2*r);
int shDim_y = (blockDim.y + 2*r);
int offset = shDim_x * shDim_y;
int i, j;
int iSh, jSh;
//Copy the gaussian kernel into shared memory
//Blocks that do not require boundry check
if( (blockIdx.x*blockDim.x >= r ) && (blockIdx.y*blockDim.y >= r) &&
(((blockIdx.x+1)*blockDim.x +r) < w) && (((blockIdx.y+1)*blockDim.y +r) < h)){
//Collaborative loading into shared memory
for( i = y-r, iSh = threadIdx.y ; i< (blockDim.y*(blockIdx.y + 1) + r) ; i+=blockDim.y , iSh+=blockDim.y ){
for( j = x-r, jSh = threadIdx.x ; j < (blockDim.x*(blockIdx.x + 1) + r) ; j+=blockDim.x , jSh+=blockDim.x){
picBlock[(iSh*shDim_x+jSh)] = d_imageArray[(i*w+j)*3];
picBlock[(iSh*shDim_x+jSh)+offset] = d_imageArray[(i*w+j)*3+1];
picBlock[(iSh*shDim_x+jSh)+offset*2] = d_imageArray[(i*w+j)*3+2];
}
}
}
//These blocks may access picture elements that are out of bounds
else{
int xLim = blockDim.x*(blockIdx.x + 1) > w ? w : (blockDim.x*(blockIdx.x + 1) + r) ;
int yLim = blockDim.y*(blockIdx.y + 1) > h ? h : (blockDim.y*(blockIdx.y + 1) + r);
int xStep = blockDim.x*(blockIdx.x + 1) > w ? w%blockDim.x : blockDim.x ;
int yStep = blockDim.y*(blockIdx.y + 1) > h ? h%blockDim.y : blockDim.y ;
//Collaborative loading into shared memory
for( i = y-r, iSh = threadIdx.y ; i< yLim ; iSh+=yStep , i +=yStep ){
for( j = x-r, jSh = threadIdx.x ; j < xLim ; jSh+=xStep , j +=xStep){
idxN = i<0? 0 : (i>=h ? h-1 : i );
idxS = j<0? 0 : (j>=w ? w-1 : j );
picBlock[(iSh*shDim_x+jSh)] = d_imageArray[(idxN*w+idxS)*3];
picBlock[(iSh*shDim_x+jSh)+offset] = d_imageArray[(idxN*w+idxS)*3+1];
picBlock[(iSh*shDim_x+jSh)+offset*2] = d_imageArray[(idxN*w+idxS)*3+2];
}
}
}
__syncthreads(); //Make sure every thread has loaded all its portions.
/*
* All the subblocks are now in shared memory. Now we blurr the image.
*/
for( i = 0; i <= r; i++){
//Kernel is symetrix along x and y axis.
idxN = idxS = ((threadIdx.y+r+i)*shDim_x + (threadIdx.x+r));
idxW = idxE = ((threadIdx.y+r-i)*shDim_x + (threadIdx.x+r));
iSh = (i+r)*d+r;
//Loop Unrolling 2 times.
for( j = 0; j <= r-1 ; j+=2){
Gaus_val = d_const_Gaussian[ iSh ];
tempR += (picBlock[idxN]+picBlock[idxS] + picBlock[idxE]+picBlock[idxW])*Gaus_val;
tempG += (picBlock[idxN+offset]+picBlock[idxS+offset]+picBlock[idxE+offset]+picBlock[idxW+offset])*Gaus_val;
tempB += (picBlock[idxN+offset*2]+picBlock[idxS+offset*2]+picBlock[idxE+offset*2]+picBlock[idxW+offset*2])*Gaus_val;
idxS++; idxN--; idxE++; idxW--; iSh++;
Gaus_val = d_const_Gaussian[ iSh ];
tempR += (picBlock[idxN]+picBlock[idxS] + picBlock[idxE]+picBlock[idxW])*Gaus_val;
tempG += (picBlock[idxN+offset]+picBlock[idxS+offset]+picBlock[idxE+offset]+picBlock[idxW+offset])*Gaus_val;
tempB += (picBlock[idxN+offset*2]+picBlock[idxS+offset*2]+picBlock[idxE+offset*2]+picBlock[idxW+offset*2])*Gaus_val;
idxS++; idxN--; idxE++; idxW--; iSh++;
}
//Complete the unrolled portion
for( ; j <= r ; j++){
Gaus_val = d_const_Gaussian[ iSh ];
tempR += (picBlock[idxN]+picBlock[idxS] + picBlock[idxE]+picBlock[idxW])*Gaus_val;
tempG += (picBlock[idxN+offset]+picBlock[idxS+offset]+picBlock[idxE+offset]+picBlock[idxW+offset])*Gaus_val;
tempB += (picBlock[idxN+offset*2]+picBlock[idxS+offset*2]+picBlock[idxE+offset*2]+picBlock[idxW+offset*2])*Gaus_val;
idxS++; idxN--; idxE++; idxW--; iSh++;
}
}
//store the blurrred image.
idx = ((y * w) + x)*3;
d_imageArrayResult[idx++] = tempR;
d_imageArrayResult[idx++] = tempG;
d_imageArrayResult[idx++] = tempB;
}
void blurr_CPU( float *h_imageArray, float *h_imageArrayResult, int w, int h, int r, float * h_Gaussian ){
// for every pixel in p, get it's Rgba structure, and convert the
// red/green/blue values there to luminance L, effectively converting
// it to greyscale:
int d = 2*r+1;
for (int y = (0+r); y < (h-r); ++y) {
for (int x = (0+r); x < (w-r); ++x) {
float tempR = 0.0f;
float tempG = 0.0f;
float tempB = 0.0f;
for(int i = -r; i <= r; i++){
for(int j = -r; j <= r ; j++){
unsigned int idx = (((y+i) * w) + (x+j)) * 3;
float Gaus_val = h_Gaussian[(i+r)*d+r];
tempR += h_imageArray[idx]*Gaus_val;
tempG += h_imageArray[idx+1]*Gaus_val;
tempB += h_imageArray[idx+2]*Gaus_val;
}
}
unsigned int idx = (((y) * w) + (x)) * 3;
h_imageArrayResult[idx] = tempR;
h_imageArrayResult[idx+1] = tempG;
h_imageArrayResult[idx+2] = tempB;
}
}
}
int main (int argc, char *argv[])
{
struct timeval t0, t1;
int w, h; // the width & height of the image, used frequently
int r = atoi( argv[2] ); //Pray that the input radius is a single digit
hipDeviceProp_t dev_prop_curr;
hipGetDeviceProperties( &dev_prop_curr , 0);
//printDevProp( dev_prop_curr );
/*
*Start by calculating the Gaussian Kernel
*/
int d = 2*r+1;
float h_Gaussian[d][d];
float sigma = ((float)r/3.0);
printf("sigma %10.4f\n",sigma);
float preFactor = 1/(2*M_PI*sigma*sigma);
float normalization = 0.0f;
/*
* Generate a Gaussian matrix
*/
/*
* non-normalized Gausian
*/
for(int i = -r; i <= r; i++){
for(int j = -r; j <= r ; j++){
float tempGauss = preFactor* exp( -(i*i+j*j)/(2*sigma*sigma) );
h_Gaussian[i+r][j+r]= tempGauss;
normalization += tempGauss;
}
}
/*
* normalized Gaussian
*/
for(int i = -r; i <= r; i++){
for(int j = -r; j <= r ; j++){
h_Gaussian[i+r][j+r] /= normalization;
if(i==0){
h_Gaussian[i+r][j+r]/=2.0;
}
if(j==0){
h_Gaussian[i+r][j+r]/=2.0;
}
//printf("%e\t",h_Gaussian[i+r][j+r]);
}
//printf("\n");
}
//
// GPU version: it will save whatever is in h_imageArrayResult
// to the file "hw1b.exr"
//
float *h_imageArray;
readOpenEXRFile (argv[1], &h_imageArray, w, h);
float * h_imageArrayResult = (float*)malloc(sizeof(float)*3*w*h);
gettimeofday(&t0,0);
//
// process it on the GPU: 1) copy it to device memory, 2) process
// it with a 2d grid of 2d blocks, with each thread assigned to a
// pixel. then 3) copy it back.
//
float * d_test;
float BLOCK_X = 16.0;
float BLOCK_Y = 16.0;
//For Nvidia cluster.
if( dev_prop_curr.maxThreadsPerBlock >= 1024 ){
BLOCK_X = 32.0;
BLOCK_Y = 32.0;
}
size_t sharedBlockSZ = 3*(BLOCK_X+2*r) * (BLOCK_Y+2*r) * sizeof(float); //Picture blocks
if(sharedBlockSZ > dev_prop_curr.sharedMemPerBlock){
printf("Shared Memory exceeded allocated size per block: %lu Max %lu\n",sharedBlockSZ, dev_prop_curr.sharedMemPerBlock);
return -1;
}
dim3 numThreads( BLOCK_X, BLOCK_Y,1);
dim3 numBlocks( ceil(w/BLOCK_X), ceil(h/BLOCK_Y),1);
float * d_imageArray;
float * d_imageArrayResult;
GPU_CHECKERROR( hipMalloc((void **)&d_imageArray, sizeof(float)*w*h*3) );
GPU_CHECKERROR( hipMalloc((void **)&d_imageArrayResult, sizeof(float)*w*h*3) );
GPU_CHECKERROR( hipMalloc((void **)&d_test, sizeof(float)*D_MAX*D_MAX) );
GPU_CHECKERROR( hipMemcpyToSymbol(
d_const_Gaussian,
&h_Gaussian[0][0],
sizeof(float)*d*d,
0,
hipMemcpyHostToDevice));
GPU_CHECKERROR( hipMemcpy( d_imageArray,
h_imageArray,
sizeof(float)*w*h*3,
hipMemcpyHostToDevice ) );
//
// Your memory copy, & kernel launch code goes here:
//
printf("Launching one kernel\n");
hipLaunchKernelGGL(( blurr_GPU), dim3(numBlocks), dim3(numThreads) , sharedBlockSZ, 0, d_imageArray,d_imageArrayResult,d_test,w,h,r);
GPU_CHECKERROR( hipGetLastError() );
//}
GPU_CHECKERROR( hipDeviceSynchronize() );
//
//Fetch the results
//
GPU_CHECKERROR( hipMemcpy( h_imageArrayResult,
d_imageArrayResult,
sizeof(float)*w*h*3,
hipMemcpyDeviceToHost ) );
gettimeofday(&t1,0);
float timdiff = (1000000.0*(t1.tv_sec - t0.tv_sec) + (t1.tv_usec - t0.tv_usec)) / 1000.0;
printf ("\ndone: time taken for parallel version is %10.1f ms\n", timdiff);
printf("writing output image hw1b.exr\n");
writeOpenEXRFile ("hw1b.exr", h_imageArrayResult, w, h);
free (h_imageArray);
free (h_imageArrayResult);
GPU_CHECKERROR( hipFree(d_imageArray) );
GPU_CHECKERROR( hipFree(d_imageArrayResult));
GPU_CHECKERROR( hipFree(d_test) );
printf("done.\n");
return 0;
}
|
82cb0f911a21db2d90c61547f1cc404d26fa77e5.cu
|
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
// STUDENTS: be sure to set the single define at the top of this file,
// depending on which machines you are running on.
#include "im1.h"
#define MAX_SHARED_MEM 16384
//Constant buffer for the gaussian kernel
#define D_MAX 50 //10000Bytes < 64KB
__constant__ float d_const_Gaussian[D_MAX*D_MAX];
// handy error macro:
#define GPU_CHECKERROR( err ) (gpuCheckError( err, __FILE__, __LINE__ ))
inline static void gpuCheckError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
//Code to display device properties.
// http://gpucoder.livejournal.com/1064.html
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
//
// your __global__ kernel can go here, if you want:
//
__global__ void blurr_GPU(float * d_imageArray,float * d_imageArrayResult, float * d_dev_Gaussian, int w,int h, int r){
int d = 2*r + 1;
extern __shared__ float picBlock[];
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if(x >= w || y >= h)
return;
int idx;
unsigned int idxN, idxS, idxW, idxE;
float tempR = 0.0;
float tempG = 0.0;
float tempB = 0.0;
float Gaus_val = 0.0;
int shDim_x = (blockDim.x + 2*r);
int shDim_y = (blockDim.y + 2*r);
int offset = shDim_x * shDim_y;
int i, j;
int iSh, jSh;
//Copy the gaussian kernel into shared memory
//Blocks that do not require boundry check
if( (blockIdx.x*blockDim.x >= r ) && (blockIdx.y*blockDim.y >= r) &&
(((blockIdx.x+1)*blockDim.x +r) < w) && (((blockIdx.y+1)*blockDim.y +r) < h)){
//Collaborative loading into shared memory
for( i = y-r, iSh = threadIdx.y ; i< (blockDim.y*(blockIdx.y + 1) + r) ; i+=blockDim.y , iSh+=blockDim.y ){
for( j = x-r, jSh = threadIdx.x ; j < (blockDim.x*(blockIdx.x + 1) + r) ; j+=blockDim.x , jSh+=blockDim.x){
picBlock[(iSh*shDim_x+jSh)] = d_imageArray[(i*w+j)*3];
picBlock[(iSh*shDim_x+jSh)+offset] = d_imageArray[(i*w+j)*3+1];
picBlock[(iSh*shDim_x+jSh)+offset*2] = d_imageArray[(i*w+j)*3+2];
}
}
}
//These blocks may access picture elements that are out of bounds
else{
int xLim = blockDim.x*(blockIdx.x + 1) > w ? w : (blockDim.x*(blockIdx.x + 1) + r) ;
int yLim = blockDim.y*(blockIdx.y + 1) > h ? h : (blockDim.y*(blockIdx.y + 1) + r);
int xStep = blockDim.x*(blockIdx.x + 1) > w ? w%blockDim.x : blockDim.x ;
int yStep = blockDim.y*(blockIdx.y + 1) > h ? h%blockDim.y : blockDim.y ;
//Collaborative loading into shared memory
for( i = y-r, iSh = threadIdx.y ; i< yLim ; iSh+=yStep , i +=yStep ){
for( j = x-r, jSh = threadIdx.x ; j < xLim ; jSh+=xStep , j +=xStep){
idxN = i<0? 0 : (i>=h ? h-1 : i );
idxS = j<0? 0 : (j>=w ? w-1 : j );
picBlock[(iSh*shDim_x+jSh)] = d_imageArray[(idxN*w+idxS)*3];
picBlock[(iSh*shDim_x+jSh)+offset] = d_imageArray[(idxN*w+idxS)*3+1];
picBlock[(iSh*shDim_x+jSh)+offset*2] = d_imageArray[(idxN*w+idxS)*3+2];
}
}
}
__syncthreads(); //Make sure every thread has loaded all its portions.
/*
* All the subblocks are now in shared memory. Now we blurr the image.
*/
for( i = 0; i <= r; i++){
//Kernel is symetrix along x and y axis.
idxN = idxS = ((threadIdx.y+r+i)*shDim_x + (threadIdx.x+r));
idxW = idxE = ((threadIdx.y+r-i)*shDim_x + (threadIdx.x+r));
iSh = (i+r)*d+r;
//Loop Unrolling 2 times.
for( j = 0; j <= r-1 ; j+=2){
Gaus_val = d_const_Gaussian[ iSh ];
tempR += (picBlock[idxN]+picBlock[idxS] + picBlock[idxE]+picBlock[idxW])*Gaus_val;
tempG += (picBlock[idxN+offset]+picBlock[idxS+offset]+picBlock[idxE+offset]+picBlock[idxW+offset])*Gaus_val;
tempB += (picBlock[idxN+offset*2]+picBlock[idxS+offset*2]+picBlock[idxE+offset*2]+picBlock[idxW+offset*2])*Gaus_val;
idxS++; idxN--; idxE++; idxW--; iSh++;
Gaus_val = d_const_Gaussian[ iSh ];
tempR += (picBlock[idxN]+picBlock[idxS] + picBlock[idxE]+picBlock[idxW])*Gaus_val;
tempG += (picBlock[idxN+offset]+picBlock[idxS+offset]+picBlock[idxE+offset]+picBlock[idxW+offset])*Gaus_val;
tempB += (picBlock[idxN+offset*2]+picBlock[idxS+offset*2]+picBlock[idxE+offset*2]+picBlock[idxW+offset*2])*Gaus_val;
idxS++; idxN--; idxE++; idxW--; iSh++;
}
//Complete the unrolled portion
for( ; j <= r ; j++){
Gaus_val = d_const_Gaussian[ iSh ];
tempR += (picBlock[idxN]+picBlock[idxS] + picBlock[idxE]+picBlock[idxW])*Gaus_val;
tempG += (picBlock[idxN+offset]+picBlock[idxS+offset]+picBlock[idxE+offset]+picBlock[idxW+offset])*Gaus_val;
tempB += (picBlock[idxN+offset*2]+picBlock[idxS+offset*2]+picBlock[idxE+offset*2]+picBlock[idxW+offset*2])*Gaus_val;
idxS++; idxN--; idxE++; idxW--; iSh++;
}
}
//store the blurrred image.
idx = ((y * w) + x)*3;
d_imageArrayResult[idx++] = tempR;
d_imageArrayResult[idx++] = tempG;
d_imageArrayResult[idx++] = tempB;
}
void blurr_CPU( float *h_imageArray, float *h_imageArrayResult, int w, int h, int r, float * h_Gaussian ){
// for every pixel in p, get it's Rgba structure, and convert the
// red/green/blue values there to luminance L, effectively converting
// it to greyscale:
int d = 2*r+1;
for (int y = (0+r); y < (h-r); ++y) {
for (int x = (0+r); x < (w-r); ++x) {
float tempR = 0.0f;
float tempG = 0.0f;
float tempB = 0.0f;
for(int i = -r; i <= r; i++){
for(int j = -r; j <= r ; j++){
unsigned int idx = (((y+i) * w) + (x+j)) * 3;
float Gaus_val = h_Gaussian[(i+r)*d+r];
tempR += h_imageArray[idx]*Gaus_val;
tempG += h_imageArray[idx+1]*Gaus_val;
tempB += h_imageArray[idx+2]*Gaus_val;
}
}
unsigned int idx = (((y) * w) + (x)) * 3;
h_imageArrayResult[idx] = tempR;
h_imageArrayResult[idx+1] = tempG;
h_imageArrayResult[idx+2] = tempB;
}
}
}
int main (int argc, char *argv[])
{
struct timeval t0, t1;
int w, h; // the width & height of the image, used frequently
int r = atoi( argv[2] ); //Pray that the input radius is a single digit
cudaDeviceProp dev_prop_curr;
cudaGetDeviceProperties( &dev_prop_curr , 0);
//printDevProp( dev_prop_curr );
/*
*Start by calculating the Gaussian Kernel
*/
int d = 2*r+1;
float h_Gaussian[d][d];
float sigma = ((float)r/3.0);
printf("sigma %10.4f\n",sigma);
float preFactor = 1/(2*M_PI*sigma*sigma);
float normalization = 0.0f;
/*
* Generate a Gaussian matrix
*/
/*
* non-normalized Gausian
*/
for(int i = -r; i <= r; i++){
for(int j = -r; j <= r ; j++){
float tempGauss = preFactor* exp( -(i*i+j*j)/(2*sigma*sigma) );
h_Gaussian[i+r][j+r]= tempGauss;
normalization += tempGauss;
}
}
/*
* normalized Gaussian
*/
for(int i = -r; i <= r; i++){
for(int j = -r; j <= r ; j++){
h_Gaussian[i+r][j+r] /= normalization;
if(i==0){
h_Gaussian[i+r][j+r]/=2.0;
}
if(j==0){
h_Gaussian[i+r][j+r]/=2.0;
}
//printf("%e\t",h_Gaussian[i+r][j+r]);
}
//printf("\n");
}
//
// GPU version: it will save whatever is in h_imageArrayResult
// to the file "hw1b.exr"
//
float *h_imageArray;
readOpenEXRFile (argv[1], &h_imageArray, w, h);
float * h_imageArrayResult = (float*)malloc(sizeof(float)*3*w*h);
gettimeofday(&t0,0);
//
// process it on the GPU: 1) copy it to device memory, 2) process
// it with a 2d grid of 2d blocks, with each thread assigned to a
// pixel. then 3) copy it back.
//
float * d_test;
float BLOCK_X = 16.0;
float BLOCK_Y = 16.0;
//For Nvidia cluster.
if( dev_prop_curr.maxThreadsPerBlock >= 1024 ){
BLOCK_X = 32.0;
BLOCK_Y = 32.0;
}
size_t sharedBlockSZ = 3*(BLOCK_X+2*r) * (BLOCK_Y+2*r) * sizeof(float); //Picture blocks
if(sharedBlockSZ > dev_prop_curr.sharedMemPerBlock){
printf("Shared Memory exceeded allocated size per block: %lu Max %lu\n",sharedBlockSZ, dev_prop_curr.sharedMemPerBlock);
return -1;
}
dim3 numThreads( BLOCK_X, BLOCK_Y,1);
dim3 numBlocks( ceil(w/BLOCK_X), ceil(h/BLOCK_Y),1);
float * d_imageArray;
float * d_imageArrayResult;
GPU_CHECKERROR( cudaMalloc((void **)&d_imageArray, sizeof(float)*w*h*3) );
GPU_CHECKERROR( cudaMalloc((void **)&d_imageArrayResult, sizeof(float)*w*h*3) );
GPU_CHECKERROR( cudaMalloc((void **)&d_test, sizeof(float)*D_MAX*D_MAX) );
GPU_CHECKERROR( cudaMemcpyToSymbol(
d_const_Gaussian,
&h_Gaussian[0][0],
sizeof(float)*d*d,
0,
cudaMemcpyHostToDevice));
GPU_CHECKERROR( cudaMemcpy( d_imageArray,
h_imageArray,
sizeof(float)*w*h*3,
cudaMemcpyHostToDevice ) );
//
// Your memory copy, & kernel launch code goes here:
//
printf("Launching one kernel\n");
blurr_GPU<<< numBlocks, numThreads , sharedBlockSZ>>>( d_imageArray,d_imageArrayResult,d_test,w,h,r);
GPU_CHECKERROR( cudaGetLastError() );
//}
GPU_CHECKERROR( cudaDeviceSynchronize() );
//
//Fetch the results
//
GPU_CHECKERROR( cudaMemcpy( h_imageArrayResult,
d_imageArrayResult,
sizeof(float)*w*h*3,
cudaMemcpyDeviceToHost ) );
gettimeofday(&t1,0);
float timdiff = (1000000.0*(t1.tv_sec - t0.tv_sec) + (t1.tv_usec - t0.tv_usec)) / 1000.0;
printf ("\ndone: time taken for parallel version is %10.1f ms\n", timdiff);
printf("writing output image hw1b.exr\n");
writeOpenEXRFile ("hw1b.exr", h_imageArrayResult, w, h);
free (h_imageArray);
free (h_imageArrayResult);
GPU_CHECKERROR( cudaFree(d_imageArray) );
GPU_CHECKERROR( cudaFree(d_imageArrayResult));
GPU_CHECKERROR( cudaFree(d_test) );
printf("done.\n");
return 0;
}
|
97847cd93fe187aa570459ed3b65104349d88c9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _CUGAUSSIANFILTERROWX_CU_
#define _CUGAUSSIANFILTERROWX_CU_
#include "hipacc_types.hpp"
#include "hipacc_math_functions.hpp"
texture<uchar, hipTextureType1D, hipReadModeElementType> _texinputX;
const textureReference *_texinputXRef;
extern "C" {
__global__ __launch_bounds__ (64*1) void cuGaussianFilterRowXKernel(float * __restrict__ iter, int iter_width, int iter_height, int iter_stride, int input_width, int input_height, int input_stride, int bh_start_left, int bh_start_right, int bh_start_bottom, int bh_fall_back) {
const int gid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int gid_y = blockDim.y * blockIdx.y * 8 + threadIdx.y;
if (bh_fall_back)
goto BH_FB;
if (blockIdx.y >= bh_start_bottom)
goto BH_B;
if (blockIdx.x >= bh_start_right)
goto BH_R;
if (blockIdx.x < bh_start_left)
goto BH_L;
goto BH_NO;
BH_FB:
{
if (gid_x < iter_width) {
if (gid_y < iter_height) {
float _tmp0 = 0.F;
{
int _gid_x1 = gid_x + -2;
int _gid_y1 = gid_y + 0;
if (_gid_x1 >= input_width)
_gid_x1 = input_width - 1;
if (_gid_x1 < 0)
_gid_x1 = 0;
_tmp0 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y1) * input_stride + _gid_x1);
}
{
int _gid_x2 = gid_x + -1;
int _gid_y2 = gid_y + 0;
if (_gid_x2 >= input_width)
_gid_x2 = input_width - 1;
if (_gid_x2 < 0)
_gid_x2 = 0;
_tmp0 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y2) * input_stride + _gid_x2);
}
{
int _gid_x3 = gid_x + 0;
int _gid_y3 = gid_y + 0;
if (_gid_x3 >= input_width)
_gid_x3 = input_width - 1;
if (_gid_x3 < 0)
_gid_x3 = 0;
_tmp0 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y3) * input_stride + _gid_x3);
}
{
int _gid_x4 = gid_x + 1;
int _gid_y4 = gid_y + 0;
if (_gid_x4 >= input_width)
_gid_x4 = input_width - 1;
if (_gid_x4 < 0)
_gid_x4 = 0;
_tmp0 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y4) * input_stride + _gid_x4);
}
{
int _gid_x5 = gid_x + 2;
int _gid_y5 = gid_y + 0;
if (_gid_x5 >= input_width)
_gid_x5 = input_width - 1;
if (_gid_x5 < 0)
_gid_x5 = 0;
_tmp0 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y5) * input_stride + _gid_x5);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp0);
}
}
if (gid_x < iter_width) {
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp6 = 0.F;
{
int _gid_x7 = gid_x + -2;
int _gid_y7 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x7 >= input_width)
_gid_x7 = input_width - 1;
if (_gid_x7 < 0)
_gid_x7 = 0;
_tmp6 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y7) * input_stride + _gid_x7);
}
{
int _gid_x8 = gid_x + -1;
int _gid_y8 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x8 >= input_width)
_gid_x8 = input_width - 1;
if (_gid_x8 < 0)
_gid_x8 = 0;
_tmp6 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y8) * input_stride + _gid_x8);
}
{
int _gid_x9 = gid_x + 0;
int _gid_y9 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x9 >= input_width)
_gid_x9 = input_width - 1;
if (_gid_x9 < 0)
_gid_x9 = 0;
_tmp6 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y9) * input_stride + _gid_x9);
}
{
int _gid_x10 = gid_x + 1;
int _gid_y10 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x10 >= input_width)
_gid_x10 = input_width - 1;
if (_gid_x10 < 0)
_gid_x10 = 0;
_tmp6 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y10) * input_stride + _gid_x10);
}
{
int _gid_x11 = gid_x + 2;
int _gid_y11 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x11 >= input_width)
_gid_x11 = input_width - 1;
if (_gid_x11 < 0)
_gid_x11 = 0;
_tmp6 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y11) * input_stride + _gid_x11);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp6);
}
}
if (gid_x < iter_width) {
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp12 = 0.F;
{
int _gid_x13 = gid_x + -2;
int _gid_y13 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x13 >= input_width)
_gid_x13 = input_width - 1;
if (_gid_x13 < 0)
_gid_x13 = 0;
_tmp12 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y13) * input_stride + _gid_x13);
}
{
int _gid_x14 = gid_x + -1;
int _gid_y14 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x14 >= input_width)
_gid_x14 = input_width - 1;
if (_gid_x14 < 0)
_gid_x14 = 0;
_tmp12 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y14) * input_stride + _gid_x14);
}
{
int _gid_x15 = gid_x + 0;
int _gid_y15 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x15 >= input_width)
_gid_x15 = input_width - 1;
if (_gid_x15 < 0)
_gid_x15 = 0;
_tmp12 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y15) * input_stride + _gid_x15);
}
{
int _gid_x16 = gid_x + 1;
int _gid_y16 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x16 >= input_width)
_gid_x16 = input_width - 1;
if (_gid_x16 < 0)
_gid_x16 = 0;
_tmp12 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y16) * input_stride + _gid_x16);
}
{
int _gid_x17 = gid_x + 2;
int _gid_y17 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x17 >= input_width)
_gid_x17 = input_width - 1;
if (_gid_x17 < 0)
_gid_x17 = 0;
_tmp12 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y17) * input_stride + _gid_x17);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp12);
}
}
if (gid_x < iter_width) {
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp18 = 0.F;
{
int _gid_x19 = gid_x + -2;
int _gid_y19 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x19 >= input_width)
_gid_x19 = input_width - 1;
if (_gid_x19 < 0)
_gid_x19 = 0;
_tmp18 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y19) * input_stride + _gid_x19);
}
{
int _gid_x20 = gid_x + -1;
int _gid_y20 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x20 >= input_width)
_gid_x20 = input_width - 1;
if (_gid_x20 < 0)
_gid_x20 = 0;
_tmp18 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y20) * input_stride + _gid_x20);
}
{
int _gid_x21 = gid_x + 0;
int _gid_y21 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x21 >= input_width)
_gid_x21 = input_width - 1;
if (_gid_x21 < 0)
_gid_x21 = 0;
_tmp18 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y21) * input_stride + _gid_x21);
}
{
int _gid_x22 = gid_x + 1;
int _gid_y22 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x22 >= input_width)
_gid_x22 = input_width - 1;
if (_gid_x22 < 0)
_gid_x22 = 0;
_tmp18 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y22) * input_stride + _gid_x22);
}
{
int _gid_x23 = gid_x + 2;
int _gid_y23 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x23 >= input_width)
_gid_x23 = input_width - 1;
if (_gid_x23 < 0)
_gid_x23 = 0;
_tmp18 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y23) * input_stride + _gid_x23);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp18);
}
}
if (gid_x < iter_width) {
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp24 = 0.F;
{
int _gid_x25 = gid_x + -2;
int _gid_y25 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x25 >= input_width)
_gid_x25 = input_width - 1;
if (_gid_x25 < 0)
_gid_x25 = 0;
_tmp24 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y25) * input_stride + _gid_x25);
}
{
int _gid_x26 = gid_x + -1;
int _gid_y26 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x26 >= input_width)
_gid_x26 = input_width - 1;
if (_gid_x26 < 0)
_gid_x26 = 0;
_tmp24 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y26) * input_stride + _gid_x26);
}
{
int _gid_x27 = gid_x + 0;
int _gid_y27 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x27 >= input_width)
_gid_x27 = input_width - 1;
if (_gid_x27 < 0)
_gid_x27 = 0;
_tmp24 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y27) * input_stride + _gid_x27);
}
{
int _gid_x28 = gid_x + 1;
int _gid_y28 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x28 >= input_width)
_gid_x28 = input_width - 1;
if (_gid_x28 < 0)
_gid_x28 = 0;
_tmp24 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y28) * input_stride + _gid_x28);
}
{
int _gid_x29 = gid_x + 2;
int _gid_y29 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x29 >= input_width)
_gid_x29 = input_width - 1;
if (_gid_x29 < 0)
_gid_x29 = 0;
_tmp24 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y29) * input_stride + _gid_x29);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp24);
}
}
if (gid_x < iter_width) {
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp30 = 0.F;
{
int _gid_x31 = gid_x + -2;
int _gid_y31 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x31 >= input_width)
_gid_x31 = input_width - 1;
if (_gid_x31 < 0)
_gid_x31 = 0;
_tmp30 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y31) * input_stride + _gid_x31);
}
{
int _gid_x32 = gid_x + -1;
int _gid_y32 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x32 >= input_width)
_gid_x32 = input_width - 1;
if (_gid_x32 < 0)
_gid_x32 = 0;
_tmp30 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y32) * input_stride + _gid_x32);
}
{
int _gid_x33 = gid_x + 0;
int _gid_y33 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x33 >= input_width)
_gid_x33 = input_width - 1;
if (_gid_x33 < 0)
_gid_x33 = 0;
_tmp30 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y33) * input_stride + _gid_x33);
}
{
int _gid_x34 = gid_x + 1;
int _gid_y34 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x34 >= input_width)
_gid_x34 = input_width - 1;
if (_gid_x34 < 0)
_gid_x34 = 0;
_tmp30 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y34) * input_stride + _gid_x34);
}
{
int _gid_x35 = gid_x + 2;
int _gid_y35 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x35 >= input_width)
_gid_x35 = input_width - 1;
if (_gid_x35 < 0)
_gid_x35 = 0;
_tmp30 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y35) * input_stride + _gid_x35);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp30);
}
}
if (gid_x < iter_width) {
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp36 = 0.F;
{
int _gid_x37 = gid_x + -2;
int _gid_y37 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x37 >= input_width)
_gid_x37 = input_width - 1;
if (_gid_x37 < 0)
_gid_x37 = 0;
_tmp36 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y37) * input_stride + _gid_x37);
}
{
int _gid_x38 = gid_x + -1;
int _gid_y38 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x38 >= input_width)
_gid_x38 = input_width - 1;
if (_gid_x38 < 0)
_gid_x38 = 0;
_tmp36 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y38) * input_stride + _gid_x38);
}
{
int _gid_x39 = gid_x + 0;
int _gid_y39 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x39 >= input_width)
_gid_x39 = input_width - 1;
if (_gid_x39 < 0)
_gid_x39 = 0;
_tmp36 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y39) * input_stride + _gid_x39);
}
{
int _gid_x40 = gid_x + 1;
int _gid_y40 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x40 >= input_width)
_gid_x40 = input_width - 1;
if (_gid_x40 < 0)
_gid_x40 = 0;
_tmp36 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y40) * input_stride + _gid_x40);
}
{
int _gid_x41 = gid_x + 2;
int _gid_y41 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x41 >= input_width)
_gid_x41 = input_width - 1;
if (_gid_x41 < 0)
_gid_x41 = 0;
_tmp36 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y41) * input_stride + _gid_x41);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp36);
}
}
if (gid_x < iter_width) {
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp42 = 0.F;
{
int _gid_x43 = gid_x + -2;
int _gid_y43 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x43 >= input_width)
_gid_x43 = input_width - 1;
if (_gid_x43 < 0)
_gid_x43 = 0;
_tmp42 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y43) * input_stride + _gid_x43);
}
{
int _gid_x44 = gid_x + -1;
int _gid_y44 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x44 >= input_width)
_gid_x44 = input_width - 1;
if (_gid_x44 < 0)
_gid_x44 = 0;
_tmp42 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y44) * input_stride + _gid_x44);
}
{
int _gid_x45 = gid_x + 0;
int _gid_y45 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x45 >= input_width)
_gid_x45 = input_width - 1;
if (_gid_x45 < 0)
_gid_x45 = 0;
_tmp42 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y45) * input_stride + _gid_x45);
}
{
int _gid_x46 = gid_x + 1;
int _gid_y46 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x46 >= input_width)
_gid_x46 = input_width - 1;
if (_gid_x46 < 0)
_gid_x46 = 0;
_tmp42 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y46) * input_stride + _gid_x46);
}
{
int _gid_x47 = gid_x + 2;
int _gid_y47 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x47 >= input_width)
_gid_x47 = input_width - 1;
if (_gid_x47 < 0)
_gid_x47 = 0;
_tmp42 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y47) * input_stride + _gid_x47);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp42);
}
}
}
goto BH_EXIT;
BH_B:
{
if (gid_y < iter_height) {
float _tmp48 = 0.F;
{
int _gid_x49 = gid_x + -2;
int _gid_y49 = gid_y + 0;
if (_gid_y49 >= input_height)
_gid_y49 = input_height - 1;
_tmp48 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y49) * input_stride + _gid_x49);
}
{
int _gid_x50 = gid_x + -1;
int _gid_y50 = gid_y + 0;
if (_gid_y50 >= input_height)
_gid_y50 = input_height - 1;
_tmp48 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y50) * input_stride + _gid_x50);
}
{
int _gid_x51 = gid_x + 0;
int _gid_y51 = gid_y + 0;
if (_gid_y51 >= input_height)
_gid_y51 = input_height - 1;
_tmp48 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y51) * input_stride + _gid_x51);
}
{
int _gid_x52 = gid_x + 1;
int _gid_y52 = gid_y + 0;
if (_gid_y52 >= input_height)
_gid_y52 = input_height - 1;
_tmp48 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y52) * input_stride + _gid_x52);
}
{
int _gid_x53 = gid_x + 2;
int _gid_y53 = gid_y + 0;
if (_gid_y53 >= input_height)
_gid_y53 = input_height - 1;
_tmp48 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y53) * input_stride + _gid_x53);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp48);
}
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp54 = 0.F;
{
int _gid_x55 = gid_x + -2;
int _gid_y55 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y55 >= input_height)
_gid_y55 = input_height - 1;
_tmp54 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y55) * input_stride + _gid_x55);
}
{
int _gid_x56 = gid_x + -1;
int _gid_y56 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y56 >= input_height)
_gid_y56 = input_height - 1;
_tmp54 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y56) * input_stride + _gid_x56);
}
{
int _gid_x57 = gid_x + 0;
int _gid_y57 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y57 >= input_height)
_gid_y57 = input_height - 1;
_tmp54 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y57) * input_stride + _gid_x57);
}
{
int _gid_x58 = gid_x + 1;
int _gid_y58 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y58 >= input_height)
_gid_y58 = input_height - 1;
_tmp54 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y58) * input_stride + _gid_x58);
}
{
int _gid_x59 = gid_x + 2;
int _gid_y59 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y59 >= input_height)
_gid_y59 = input_height - 1;
_tmp54 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y59) * input_stride + _gid_x59);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp54);
}
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp60 = 0.F;
{
int _gid_x61 = gid_x + -2;
int _gid_y61 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y61 >= input_height)
_gid_y61 = input_height - 1;
_tmp60 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y61) * input_stride + _gid_x61);
}
{
int _gid_x62 = gid_x + -1;
int _gid_y62 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y62 >= input_height)
_gid_y62 = input_height - 1;
_tmp60 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y62) * input_stride + _gid_x62);
}
{
int _gid_x63 = gid_x + 0;
int _gid_y63 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y63 >= input_height)
_gid_y63 = input_height - 1;
_tmp60 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y63) * input_stride + _gid_x63);
}
{
int _gid_x64 = gid_x + 1;
int _gid_y64 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y64 >= input_height)
_gid_y64 = input_height - 1;
_tmp60 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y64) * input_stride + _gid_x64);
}
{
int _gid_x65 = gid_x + 2;
int _gid_y65 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y65 >= input_height)
_gid_y65 = input_height - 1;
_tmp60 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y65) * input_stride + _gid_x65);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp60);
}
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp66 = 0.F;
{
int _gid_x67 = gid_x + -2;
int _gid_y67 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y67 >= input_height)
_gid_y67 = input_height - 1;
_tmp66 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y67) * input_stride + _gid_x67);
}
{
int _gid_x68 = gid_x + -1;
int _gid_y68 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y68 >= input_height)
_gid_y68 = input_height - 1;
_tmp66 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y68) * input_stride + _gid_x68);
}
{
int _gid_x69 = gid_x + 0;
int _gid_y69 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y69 >= input_height)
_gid_y69 = input_height - 1;
_tmp66 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y69) * input_stride + _gid_x69);
}
{
int _gid_x70 = gid_x + 1;
int _gid_y70 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y70 >= input_height)
_gid_y70 = input_height - 1;
_tmp66 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y70) * input_stride + _gid_x70);
}
{
int _gid_x71 = gid_x + 2;
int _gid_y71 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y71 >= input_height)
_gid_y71 = input_height - 1;
_tmp66 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y71) * input_stride + _gid_x71);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp66);
}
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp72 = 0.F;
{
int _gid_x73 = gid_x + -2;
int _gid_y73 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y73 >= input_height)
_gid_y73 = input_height - 1;
_tmp72 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y73) * input_stride + _gid_x73);
}
{
int _gid_x74 = gid_x + -1;
int _gid_y74 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y74 >= input_height)
_gid_y74 = input_height - 1;
_tmp72 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y74) * input_stride + _gid_x74);
}
{
int _gid_x75 = gid_x + 0;
int _gid_y75 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y75 >= input_height)
_gid_y75 = input_height - 1;
_tmp72 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y75) * input_stride + _gid_x75);
}
{
int _gid_x76 = gid_x + 1;
int _gid_y76 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y76 >= input_height)
_gid_y76 = input_height - 1;
_tmp72 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y76) * input_stride + _gid_x76);
}
{
int _gid_x77 = gid_x + 2;
int _gid_y77 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y77 >= input_height)
_gid_y77 = input_height - 1;
_tmp72 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y77) * input_stride + _gid_x77);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp72);
}
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp78 = 0.F;
{
int _gid_x79 = gid_x + -2;
int _gid_y79 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y79 >= input_height)
_gid_y79 = input_height - 1;
_tmp78 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y79) * input_stride + _gid_x79);
}
{
int _gid_x80 = gid_x + -1;
int _gid_y80 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y80 >= input_height)
_gid_y80 = input_height - 1;
_tmp78 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y80) * input_stride + _gid_x80);
}
{
int _gid_x81 = gid_x + 0;
int _gid_y81 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y81 >= input_height)
_gid_y81 = input_height - 1;
_tmp78 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y81) * input_stride + _gid_x81);
}
{
int _gid_x82 = gid_x + 1;
int _gid_y82 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y82 >= input_height)
_gid_y82 = input_height - 1;
_tmp78 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y82) * input_stride + _gid_x82);
}
{
int _gid_x83 = gid_x + 2;
int _gid_y83 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y83 >= input_height)
_gid_y83 = input_height - 1;
_tmp78 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y83) * input_stride + _gid_x83);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp78);
}
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp84 = 0.F;
{
int _gid_x85 = gid_x + -2;
int _gid_y85 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y85 >= input_height)
_gid_y85 = input_height - 1;
_tmp84 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y85) * input_stride + _gid_x85);
}
{
int _gid_x86 = gid_x + -1;
int _gid_y86 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y86 >= input_height)
_gid_y86 = input_height - 1;
_tmp84 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y86) * input_stride + _gid_x86);
}
{
int _gid_x87 = gid_x + 0;
int _gid_y87 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y87 >= input_height)
_gid_y87 = input_height - 1;
_tmp84 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y87) * input_stride + _gid_x87);
}
{
int _gid_x88 = gid_x + 1;
int _gid_y88 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y88 >= input_height)
_gid_y88 = input_height - 1;
_tmp84 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y88) * input_stride + _gid_x88);
}
{
int _gid_x89 = gid_x + 2;
int _gid_y89 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y89 >= input_height)
_gid_y89 = input_height - 1;
_tmp84 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y89) * input_stride + _gid_x89);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp84);
}
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp90 = 0.F;
{
int _gid_x91 = gid_x + -2;
int _gid_y91 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y91 >= input_height)
_gid_y91 = input_height - 1;
_tmp90 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y91) * input_stride + _gid_x91);
}
{
int _gid_x92 = gid_x + -1;
int _gid_y92 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y92 >= input_height)
_gid_y92 = input_height - 1;
_tmp90 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y92) * input_stride + _gid_x92);
}
{
int _gid_x93 = gid_x + 0;
int _gid_y93 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y93 >= input_height)
_gid_y93 = input_height - 1;
_tmp90 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y93) * input_stride + _gid_x93);
}
{
int _gid_x94 = gid_x + 1;
int _gid_y94 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y94 >= input_height)
_gid_y94 = input_height - 1;
_tmp90 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y94) * input_stride + _gid_x94);
}
{
int _gid_x95 = gid_x + 2;
int _gid_y95 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y95 >= input_height)
_gid_y95 = input_height - 1;
_tmp90 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y95) * input_stride + _gid_x95);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp90);
}
}
goto BH_EXIT;
BH_R:
{
if (gid_x < iter_width) {
if (gid_y < iter_height) {
float _tmp96 = 0.F;
{
int _gid_x97 = gid_x + -2;
int _gid_y97 = gid_y + 0;
if (_gid_x97 >= input_width)
_gid_x97 = input_width - 1;
_tmp96 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y97) * input_stride + _gid_x97);
}
{
int _gid_x98 = gid_x + -1;
int _gid_y98 = gid_y + 0;
if (_gid_x98 >= input_width)
_gid_x98 = input_width - 1;
_tmp96 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y98) * input_stride + _gid_x98);
}
{
int _gid_x99 = gid_x + 0;
int _gid_y99 = gid_y + 0;
if (_gid_x99 >= input_width)
_gid_x99 = input_width - 1;
_tmp96 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y99) * input_stride + _gid_x99);
}
{
int _gid_x100 = gid_x + 1;
int _gid_y100 = gid_y + 0;
if (_gid_x100 >= input_width)
_gid_x100 = input_width - 1;
_tmp96 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y100) * input_stride + _gid_x100);
}
{
int _gid_x101 = gid_x + 2;
int _gid_y101 = gid_y + 0;
if (_gid_x101 >= input_width)
_gid_x101 = input_width - 1;
_tmp96 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y101) * input_stride + _gid_x101);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp96);
}
}
if (gid_x < iter_width) {
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp102 = 0.F;
{
int _gid_x103 = gid_x + -2;
int _gid_y103 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x103 >= input_width)
_gid_x103 = input_width - 1;
_tmp102 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y103) * input_stride + _gid_x103);
}
{
int _gid_x104 = gid_x + -1;
int _gid_y104 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x104 >= input_width)
_gid_x104 = input_width - 1;
_tmp102 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y104) * input_stride + _gid_x104);
}
{
int _gid_x105 = gid_x + 0;
int _gid_y105 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x105 >= input_width)
_gid_x105 = input_width - 1;
_tmp102 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y105) * input_stride + _gid_x105);
}
{
int _gid_x106 = gid_x + 1;
int _gid_y106 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x106 >= input_width)
_gid_x106 = input_width - 1;
_tmp102 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y106) * input_stride + _gid_x106);
}
{
int _gid_x107 = gid_x + 2;
int _gid_y107 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x107 >= input_width)
_gid_x107 = input_width - 1;
_tmp102 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y107) * input_stride + _gid_x107);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp102);
}
}
if (gid_x < iter_width) {
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp108 = 0.F;
{
int _gid_x109 = gid_x + -2;
int _gid_y109 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x109 >= input_width)
_gid_x109 = input_width - 1;
_tmp108 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y109) * input_stride + _gid_x109);
}
{
int _gid_x110 = gid_x + -1;
int _gid_y110 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x110 >= input_width)
_gid_x110 = input_width - 1;
_tmp108 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y110) * input_stride + _gid_x110);
}
{
int _gid_x111 = gid_x + 0;
int _gid_y111 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x111 >= input_width)
_gid_x111 = input_width - 1;
_tmp108 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y111) * input_stride + _gid_x111);
}
{
int _gid_x112 = gid_x + 1;
int _gid_y112 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x112 >= input_width)
_gid_x112 = input_width - 1;
_tmp108 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y112) * input_stride + _gid_x112);
}
{
int _gid_x113 = gid_x + 2;
int _gid_y113 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x113 >= input_width)
_gid_x113 = input_width - 1;
_tmp108 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y113) * input_stride + _gid_x113);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp108);
}
}
if (gid_x < iter_width) {
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp114 = 0.F;
{
int _gid_x115 = gid_x + -2;
int _gid_y115 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x115 >= input_width)
_gid_x115 = input_width - 1;
_tmp114 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y115) * input_stride + _gid_x115);
}
{
int _gid_x116 = gid_x + -1;
int _gid_y116 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x116 >= input_width)
_gid_x116 = input_width - 1;
_tmp114 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y116) * input_stride + _gid_x116);
}
{
int _gid_x117 = gid_x + 0;
int _gid_y117 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x117 >= input_width)
_gid_x117 = input_width - 1;
_tmp114 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y117) * input_stride + _gid_x117);
}
{
int _gid_x118 = gid_x + 1;
int _gid_y118 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x118 >= input_width)
_gid_x118 = input_width - 1;
_tmp114 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y118) * input_stride + _gid_x118);
}
{
int _gid_x119 = gid_x + 2;
int _gid_y119 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x119 >= input_width)
_gid_x119 = input_width - 1;
_tmp114 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y119) * input_stride + _gid_x119);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp114);
}
}
if (gid_x < iter_width) {
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp120 = 0.F;
{
int _gid_x121 = gid_x + -2;
int _gid_y121 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x121 >= input_width)
_gid_x121 = input_width - 1;
_tmp120 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y121) * input_stride + _gid_x121);
}
{
int _gid_x122 = gid_x + -1;
int _gid_y122 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x122 >= input_width)
_gid_x122 = input_width - 1;
_tmp120 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y122) * input_stride + _gid_x122);
}
{
int _gid_x123 = gid_x + 0;
int _gid_y123 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x123 >= input_width)
_gid_x123 = input_width - 1;
_tmp120 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y123) * input_stride + _gid_x123);
}
{
int _gid_x124 = gid_x + 1;
int _gid_y124 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x124 >= input_width)
_gid_x124 = input_width - 1;
_tmp120 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y124) * input_stride + _gid_x124);
}
{
int _gid_x125 = gid_x + 2;
int _gid_y125 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x125 >= input_width)
_gid_x125 = input_width - 1;
_tmp120 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y125) * input_stride + _gid_x125);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp120);
}
}
if (gid_x < iter_width) {
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp126 = 0.F;
{
int _gid_x127 = gid_x + -2;
int _gid_y127 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x127 >= input_width)
_gid_x127 = input_width - 1;
_tmp126 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y127) * input_stride + _gid_x127);
}
{
int _gid_x128 = gid_x + -1;
int _gid_y128 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x128 >= input_width)
_gid_x128 = input_width - 1;
_tmp126 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y128) * input_stride + _gid_x128);
}
{
int _gid_x129 = gid_x + 0;
int _gid_y129 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x129 >= input_width)
_gid_x129 = input_width - 1;
_tmp126 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y129) * input_stride + _gid_x129);
}
{
int _gid_x130 = gid_x + 1;
int _gid_y130 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x130 >= input_width)
_gid_x130 = input_width - 1;
_tmp126 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y130) * input_stride + _gid_x130);
}
{
int _gid_x131 = gid_x + 2;
int _gid_y131 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x131 >= input_width)
_gid_x131 = input_width - 1;
_tmp126 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y131) * input_stride + _gid_x131);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp126);
}
}
if (gid_x < iter_width) {
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp132 = 0.F;
{
int _gid_x133 = gid_x + -2;
int _gid_y133 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x133 >= input_width)
_gid_x133 = input_width - 1;
_tmp132 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y133) * input_stride + _gid_x133);
}
{
int _gid_x134 = gid_x + -1;
int _gid_y134 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x134 >= input_width)
_gid_x134 = input_width - 1;
_tmp132 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y134) * input_stride + _gid_x134);
}
{
int _gid_x135 = gid_x + 0;
int _gid_y135 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x135 >= input_width)
_gid_x135 = input_width - 1;
_tmp132 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y135) * input_stride + _gid_x135);
}
{
int _gid_x136 = gid_x + 1;
int _gid_y136 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x136 >= input_width)
_gid_x136 = input_width - 1;
_tmp132 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y136) * input_stride + _gid_x136);
}
{
int _gid_x137 = gid_x + 2;
int _gid_y137 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x137 >= input_width)
_gid_x137 = input_width - 1;
_tmp132 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y137) * input_stride + _gid_x137);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp132);
}
}
if (gid_x < iter_width) {
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp138 = 0.F;
{
int _gid_x139 = gid_x + -2;
int _gid_y139 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x139 >= input_width)
_gid_x139 = input_width - 1;
_tmp138 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y139) * input_stride + _gid_x139);
}
{
int _gid_x140 = gid_x + -1;
int _gid_y140 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x140 >= input_width)
_gid_x140 = input_width - 1;
_tmp138 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y140) * input_stride + _gid_x140);
}
{
int _gid_x141 = gid_x + 0;
int _gid_y141 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x141 >= input_width)
_gid_x141 = input_width - 1;
_tmp138 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y141) * input_stride + _gid_x141);
}
{
int _gid_x142 = gid_x + 1;
int _gid_y142 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x142 >= input_width)
_gid_x142 = input_width - 1;
_tmp138 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y142) * input_stride + _gid_x142);
}
{
int _gid_x143 = gid_x + 2;
int _gid_y143 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x143 >= input_width)
_gid_x143 = input_width - 1;
_tmp138 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y143) * input_stride + _gid_x143);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp138);
}
}
}
goto BH_EXIT;
BH_L:
{
if (gid_y < iter_height) {
float _tmp144 = 0.F;
{
int _gid_x145 = gid_x + -2;
int _gid_y145 = gid_y + 0;
if (_gid_x145 < 0)
_gid_x145 = 0;
_tmp144 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y145) * input_stride + _gid_x145);
}
{
int _gid_x146 = gid_x + -1;
int _gid_y146 = gid_y + 0;
if (_gid_x146 < 0)
_gid_x146 = 0;
_tmp144 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y146) * input_stride + _gid_x146);
}
{
int _gid_x147 = gid_x + 0;
int _gid_y147 = gid_y + 0;
if (_gid_x147 < 0)
_gid_x147 = 0;
_tmp144 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y147) * input_stride + _gid_x147);
}
{
int _gid_x148 = gid_x + 1;
int _gid_y148 = gid_y + 0;
if (_gid_x148 < 0)
_gid_x148 = 0;
_tmp144 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y148) * input_stride + _gid_x148);
}
{
int _gid_x149 = gid_x + 2;
int _gid_y149 = gid_y + 0;
if (_gid_x149 < 0)
_gid_x149 = 0;
_tmp144 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y149) * input_stride + _gid_x149);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp144);
}
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp150 = 0.F;
{
int _gid_x151 = gid_x + -2;
int _gid_y151 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x151 < 0)
_gid_x151 = 0;
_tmp150 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y151) * input_stride + _gid_x151);
}
{
int _gid_x152 = gid_x + -1;
int _gid_y152 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x152 < 0)
_gid_x152 = 0;
_tmp150 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y152) * input_stride + _gid_x152);
}
{
int _gid_x153 = gid_x + 0;
int _gid_y153 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x153 < 0)
_gid_x153 = 0;
_tmp150 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y153) * input_stride + _gid_x153);
}
{
int _gid_x154 = gid_x + 1;
int _gid_y154 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x154 < 0)
_gid_x154 = 0;
_tmp150 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y154) * input_stride + _gid_x154);
}
{
int _gid_x155 = gid_x + 2;
int _gid_y155 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x155 < 0)
_gid_x155 = 0;
_tmp150 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y155) * input_stride + _gid_x155);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp150);
}
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp156 = 0.F;
{
int _gid_x157 = gid_x + -2;
int _gid_y157 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x157 < 0)
_gid_x157 = 0;
_tmp156 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y157) * input_stride + _gid_x157);
}
{
int _gid_x158 = gid_x + -1;
int _gid_y158 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x158 < 0)
_gid_x158 = 0;
_tmp156 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y158) * input_stride + _gid_x158);
}
{
int _gid_x159 = gid_x + 0;
int _gid_y159 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x159 < 0)
_gid_x159 = 0;
_tmp156 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y159) * input_stride + _gid_x159);
}
{
int _gid_x160 = gid_x + 1;
int _gid_y160 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x160 < 0)
_gid_x160 = 0;
_tmp156 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y160) * input_stride + _gid_x160);
}
{
int _gid_x161 = gid_x + 2;
int _gid_y161 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x161 < 0)
_gid_x161 = 0;
_tmp156 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y161) * input_stride + _gid_x161);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp156);
}
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp162 = 0.F;
{
int _gid_x163 = gid_x + -2;
int _gid_y163 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x163 < 0)
_gid_x163 = 0;
_tmp162 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y163) * input_stride + _gid_x163);
}
{
int _gid_x164 = gid_x + -1;
int _gid_y164 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x164 < 0)
_gid_x164 = 0;
_tmp162 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y164) * input_stride + _gid_x164);
}
{
int _gid_x165 = gid_x + 0;
int _gid_y165 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x165 < 0)
_gid_x165 = 0;
_tmp162 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y165) * input_stride + _gid_x165);
}
{
int _gid_x166 = gid_x + 1;
int _gid_y166 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x166 < 0)
_gid_x166 = 0;
_tmp162 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y166) * input_stride + _gid_x166);
}
{
int _gid_x167 = gid_x + 2;
int _gid_y167 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x167 < 0)
_gid_x167 = 0;
_tmp162 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y167) * input_stride + _gid_x167);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp162);
}
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp168 = 0.F;
{
int _gid_x169 = gid_x + -2;
int _gid_y169 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x169 < 0)
_gid_x169 = 0;
_tmp168 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y169) * input_stride + _gid_x169);
}
{
int _gid_x170 = gid_x + -1;
int _gid_y170 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x170 < 0)
_gid_x170 = 0;
_tmp168 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y170) * input_stride + _gid_x170);
}
{
int _gid_x171 = gid_x + 0;
int _gid_y171 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x171 < 0)
_gid_x171 = 0;
_tmp168 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y171) * input_stride + _gid_x171);
}
{
int _gid_x172 = gid_x + 1;
int _gid_y172 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x172 < 0)
_gid_x172 = 0;
_tmp168 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y172) * input_stride + _gid_x172);
}
{
int _gid_x173 = gid_x + 2;
int _gid_y173 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x173 < 0)
_gid_x173 = 0;
_tmp168 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y173) * input_stride + _gid_x173);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp168);
}
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp174 = 0.F;
{
int _gid_x175 = gid_x + -2;
int _gid_y175 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x175 < 0)
_gid_x175 = 0;
_tmp174 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y175) * input_stride + _gid_x175);
}
{
int _gid_x176 = gid_x + -1;
int _gid_y176 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x176 < 0)
_gid_x176 = 0;
_tmp174 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y176) * input_stride + _gid_x176);
}
{
int _gid_x177 = gid_x + 0;
int _gid_y177 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x177 < 0)
_gid_x177 = 0;
_tmp174 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y177) * input_stride + _gid_x177);
}
{
int _gid_x178 = gid_x + 1;
int _gid_y178 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x178 < 0)
_gid_x178 = 0;
_tmp174 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y178) * input_stride + _gid_x178);
}
{
int _gid_x179 = gid_x + 2;
int _gid_y179 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x179 < 0)
_gid_x179 = 0;
_tmp174 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y179) * input_stride + _gid_x179);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp174);
}
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp180 = 0.F;
{
int _gid_x181 = gid_x + -2;
int _gid_y181 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x181 < 0)
_gid_x181 = 0;
_tmp180 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y181) * input_stride + _gid_x181);
}
{
int _gid_x182 = gid_x + -1;
int _gid_y182 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x182 < 0)
_gid_x182 = 0;
_tmp180 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y182) * input_stride + _gid_x182);
}
{
int _gid_x183 = gid_x + 0;
int _gid_y183 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x183 < 0)
_gid_x183 = 0;
_tmp180 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y183) * input_stride + _gid_x183);
}
{
int _gid_x184 = gid_x + 1;
int _gid_y184 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x184 < 0)
_gid_x184 = 0;
_tmp180 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y184) * input_stride + _gid_x184);
}
{
int _gid_x185 = gid_x + 2;
int _gid_y185 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x185 < 0)
_gid_x185 = 0;
_tmp180 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y185) * input_stride + _gid_x185);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp180);
}
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp186 = 0.F;
{
int _gid_x187 = gid_x + -2;
int _gid_y187 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x187 < 0)
_gid_x187 = 0;
_tmp186 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y187) * input_stride + _gid_x187);
}
{
int _gid_x188 = gid_x + -1;
int _gid_y188 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x188 < 0)
_gid_x188 = 0;
_tmp186 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y188) * input_stride + _gid_x188);
}
{
int _gid_x189 = gid_x + 0;
int _gid_y189 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x189 < 0)
_gid_x189 = 0;
_tmp186 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y189) * input_stride + _gid_x189);
}
{
int _gid_x190 = gid_x + 1;
int _gid_y190 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x190 < 0)
_gid_x190 = 0;
_tmp186 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y190) * input_stride + _gid_x190);
}
{
int _gid_x191 = gid_x + 2;
int _gid_y191 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x191 < 0)
_gid_x191 = 0;
_tmp186 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y191) * input_stride + _gid_x191);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp186);
}
}
goto BH_EXIT;
BH_NO:
{
{
float _tmp192 = 0.F;
{
_tmp192 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + -2);
}
{
_tmp192 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + -1);
}
{
_tmp192 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + 0);
}
{
_tmp192 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + 1);
}
{
_tmp192 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp192);
}
{
float _tmp193 = 0.F;
{
_tmp193 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp193 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp193 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp193 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp193 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp193);
}
{
float _tmp194 = 0.F;
{
_tmp194 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp194 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp194 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp194 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp194 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp194);
}
{
float _tmp195 = 0.F;
{
_tmp195 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp195 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp195 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp195 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp195 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp195);
}
{
float _tmp196 = 0.F;
{
_tmp196 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp196 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp196 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp196 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp196 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp196);
}
{
float _tmp197 = 0.F;
{
_tmp197 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp197 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp197 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp197 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp197 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp197);
}
{
float _tmp198 = 0.F;
{
_tmp198 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp198 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp198 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp198 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp198 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp198);
}
{
float _tmp199 = 0.F;
{
_tmp199 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp199 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp199 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp199 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp199 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp199);
}
}
goto BH_EXIT;
BH_EXIT:
;
}
}
#endif //_CUGAUSSIANFILTERROWX_CU_
|
97847cd93fe187aa570459ed3b65104349d88c9c.cu
|
#ifndef _CUGAUSSIANFILTERROWX_CU_
#define _CUGAUSSIANFILTERROWX_CU_
#include "hipacc_types.hpp"
#include "hipacc_math_functions.hpp"
texture<uchar, cudaTextureType1D, cudaReadModeElementType> _texinputX;
const textureReference *_texinputXRef;
extern "C" {
__global__ __launch_bounds__ (64*1) void cuGaussianFilterRowXKernel(float * __restrict__ iter, int iter_width, int iter_height, int iter_stride, int input_width, int input_height, int input_stride, int bh_start_left, int bh_start_right, int bh_start_bottom, int bh_fall_back) {
const int gid_x = blockDim.x * blockIdx.x + threadIdx.x;
const int gid_y = blockDim.y * blockIdx.y * 8 + threadIdx.y;
if (bh_fall_back)
goto BH_FB;
if (blockIdx.y >= bh_start_bottom)
goto BH_B;
if (blockIdx.x >= bh_start_right)
goto BH_R;
if (blockIdx.x < bh_start_left)
goto BH_L;
goto BH_NO;
BH_FB:
{
if (gid_x < iter_width) {
if (gid_y < iter_height) {
float _tmp0 = 0.F;
{
int _gid_x1 = gid_x + -2;
int _gid_y1 = gid_y + 0;
if (_gid_x1 >= input_width)
_gid_x1 = input_width - 1;
if (_gid_x1 < 0)
_gid_x1 = 0;
_tmp0 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y1) * input_stride + _gid_x1);
}
{
int _gid_x2 = gid_x + -1;
int _gid_y2 = gid_y + 0;
if (_gid_x2 >= input_width)
_gid_x2 = input_width - 1;
if (_gid_x2 < 0)
_gid_x2 = 0;
_tmp0 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y2) * input_stride + _gid_x2);
}
{
int _gid_x3 = gid_x + 0;
int _gid_y3 = gid_y + 0;
if (_gid_x3 >= input_width)
_gid_x3 = input_width - 1;
if (_gid_x3 < 0)
_gid_x3 = 0;
_tmp0 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y3) * input_stride + _gid_x3);
}
{
int _gid_x4 = gid_x + 1;
int _gid_y4 = gid_y + 0;
if (_gid_x4 >= input_width)
_gid_x4 = input_width - 1;
if (_gid_x4 < 0)
_gid_x4 = 0;
_tmp0 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y4) * input_stride + _gid_x4);
}
{
int _gid_x5 = gid_x + 2;
int _gid_y5 = gid_y + 0;
if (_gid_x5 >= input_width)
_gid_x5 = input_width - 1;
if (_gid_x5 < 0)
_gid_x5 = 0;
_tmp0 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y5) * input_stride + _gid_x5);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp0);
}
}
if (gid_x < iter_width) {
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp6 = 0.F;
{
int _gid_x7 = gid_x + -2;
int _gid_y7 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x7 >= input_width)
_gid_x7 = input_width - 1;
if (_gid_x7 < 0)
_gid_x7 = 0;
_tmp6 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y7) * input_stride + _gid_x7);
}
{
int _gid_x8 = gid_x + -1;
int _gid_y8 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x8 >= input_width)
_gid_x8 = input_width - 1;
if (_gid_x8 < 0)
_gid_x8 = 0;
_tmp6 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y8) * input_stride + _gid_x8);
}
{
int _gid_x9 = gid_x + 0;
int _gid_y9 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x9 >= input_width)
_gid_x9 = input_width - 1;
if (_gid_x9 < 0)
_gid_x9 = 0;
_tmp6 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y9) * input_stride + _gid_x9);
}
{
int _gid_x10 = gid_x + 1;
int _gid_y10 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x10 >= input_width)
_gid_x10 = input_width - 1;
if (_gid_x10 < 0)
_gid_x10 = 0;
_tmp6 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y10) * input_stride + _gid_x10);
}
{
int _gid_x11 = gid_x + 2;
int _gid_y11 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x11 >= input_width)
_gid_x11 = input_width - 1;
if (_gid_x11 < 0)
_gid_x11 = 0;
_tmp6 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y11) * input_stride + _gid_x11);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp6);
}
}
if (gid_x < iter_width) {
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp12 = 0.F;
{
int _gid_x13 = gid_x + -2;
int _gid_y13 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x13 >= input_width)
_gid_x13 = input_width - 1;
if (_gid_x13 < 0)
_gid_x13 = 0;
_tmp12 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y13) * input_stride + _gid_x13);
}
{
int _gid_x14 = gid_x + -1;
int _gid_y14 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x14 >= input_width)
_gid_x14 = input_width - 1;
if (_gid_x14 < 0)
_gid_x14 = 0;
_tmp12 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y14) * input_stride + _gid_x14);
}
{
int _gid_x15 = gid_x + 0;
int _gid_y15 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x15 >= input_width)
_gid_x15 = input_width - 1;
if (_gid_x15 < 0)
_gid_x15 = 0;
_tmp12 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y15) * input_stride + _gid_x15);
}
{
int _gid_x16 = gid_x + 1;
int _gid_y16 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x16 >= input_width)
_gid_x16 = input_width - 1;
if (_gid_x16 < 0)
_gid_x16 = 0;
_tmp12 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y16) * input_stride + _gid_x16);
}
{
int _gid_x17 = gid_x + 2;
int _gid_y17 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x17 >= input_width)
_gid_x17 = input_width - 1;
if (_gid_x17 < 0)
_gid_x17 = 0;
_tmp12 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y17) * input_stride + _gid_x17);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp12);
}
}
if (gid_x < iter_width) {
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp18 = 0.F;
{
int _gid_x19 = gid_x + -2;
int _gid_y19 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x19 >= input_width)
_gid_x19 = input_width - 1;
if (_gid_x19 < 0)
_gid_x19 = 0;
_tmp18 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y19) * input_stride + _gid_x19);
}
{
int _gid_x20 = gid_x + -1;
int _gid_y20 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x20 >= input_width)
_gid_x20 = input_width - 1;
if (_gid_x20 < 0)
_gid_x20 = 0;
_tmp18 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y20) * input_stride + _gid_x20);
}
{
int _gid_x21 = gid_x + 0;
int _gid_y21 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x21 >= input_width)
_gid_x21 = input_width - 1;
if (_gid_x21 < 0)
_gid_x21 = 0;
_tmp18 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y21) * input_stride + _gid_x21);
}
{
int _gid_x22 = gid_x + 1;
int _gid_y22 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x22 >= input_width)
_gid_x22 = input_width - 1;
if (_gid_x22 < 0)
_gid_x22 = 0;
_tmp18 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y22) * input_stride + _gid_x22);
}
{
int _gid_x23 = gid_x + 2;
int _gid_y23 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x23 >= input_width)
_gid_x23 = input_width - 1;
if (_gid_x23 < 0)
_gid_x23 = 0;
_tmp18 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y23) * input_stride + _gid_x23);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp18);
}
}
if (gid_x < iter_width) {
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp24 = 0.F;
{
int _gid_x25 = gid_x + -2;
int _gid_y25 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x25 >= input_width)
_gid_x25 = input_width - 1;
if (_gid_x25 < 0)
_gid_x25 = 0;
_tmp24 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y25) * input_stride + _gid_x25);
}
{
int _gid_x26 = gid_x + -1;
int _gid_y26 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x26 >= input_width)
_gid_x26 = input_width - 1;
if (_gid_x26 < 0)
_gid_x26 = 0;
_tmp24 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y26) * input_stride + _gid_x26);
}
{
int _gid_x27 = gid_x + 0;
int _gid_y27 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x27 >= input_width)
_gid_x27 = input_width - 1;
if (_gid_x27 < 0)
_gid_x27 = 0;
_tmp24 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y27) * input_stride + _gid_x27);
}
{
int _gid_x28 = gid_x + 1;
int _gid_y28 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x28 >= input_width)
_gid_x28 = input_width - 1;
if (_gid_x28 < 0)
_gid_x28 = 0;
_tmp24 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y28) * input_stride + _gid_x28);
}
{
int _gid_x29 = gid_x + 2;
int _gid_y29 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x29 >= input_width)
_gid_x29 = input_width - 1;
if (_gid_x29 < 0)
_gid_x29 = 0;
_tmp24 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y29) * input_stride + _gid_x29);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp24);
}
}
if (gid_x < iter_width) {
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp30 = 0.F;
{
int _gid_x31 = gid_x + -2;
int _gid_y31 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x31 >= input_width)
_gid_x31 = input_width - 1;
if (_gid_x31 < 0)
_gid_x31 = 0;
_tmp30 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y31) * input_stride + _gid_x31);
}
{
int _gid_x32 = gid_x + -1;
int _gid_y32 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x32 >= input_width)
_gid_x32 = input_width - 1;
if (_gid_x32 < 0)
_gid_x32 = 0;
_tmp30 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y32) * input_stride + _gid_x32);
}
{
int _gid_x33 = gid_x + 0;
int _gid_y33 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x33 >= input_width)
_gid_x33 = input_width - 1;
if (_gid_x33 < 0)
_gid_x33 = 0;
_tmp30 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y33) * input_stride + _gid_x33);
}
{
int _gid_x34 = gid_x + 1;
int _gid_y34 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x34 >= input_width)
_gid_x34 = input_width - 1;
if (_gid_x34 < 0)
_gid_x34 = 0;
_tmp30 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y34) * input_stride + _gid_x34);
}
{
int _gid_x35 = gid_x + 2;
int _gid_y35 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x35 >= input_width)
_gid_x35 = input_width - 1;
if (_gid_x35 < 0)
_gid_x35 = 0;
_tmp30 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y35) * input_stride + _gid_x35);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp30);
}
}
if (gid_x < iter_width) {
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp36 = 0.F;
{
int _gid_x37 = gid_x + -2;
int _gid_y37 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x37 >= input_width)
_gid_x37 = input_width - 1;
if (_gid_x37 < 0)
_gid_x37 = 0;
_tmp36 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y37) * input_stride + _gid_x37);
}
{
int _gid_x38 = gid_x + -1;
int _gid_y38 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x38 >= input_width)
_gid_x38 = input_width - 1;
if (_gid_x38 < 0)
_gid_x38 = 0;
_tmp36 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y38) * input_stride + _gid_x38);
}
{
int _gid_x39 = gid_x + 0;
int _gid_y39 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x39 >= input_width)
_gid_x39 = input_width - 1;
if (_gid_x39 < 0)
_gid_x39 = 0;
_tmp36 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y39) * input_stride + _gid_x39);
}
{
int _gid_x40 = gid_x + 1;
int _gid_y40 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x40 >= input_width)
_gid_x40 = input_width - 1;
if (_gid_x40 < 0)
_gid_x40 = 0;
_tmp36 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y40) * input_stride + _gid_x40);
}
{
int _gid_x41 = gid_x + 2;
int _gid_y41 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x41 >= input_width)
_gid_x41 = input_width - 1;
if (_gid_x41 < 0)
_gid_x41 = 0;
_tmp36 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y41) * input_stride + _gid_x41);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp36);
}
}
if (gid_x < iter_width) {
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp42 = 0.F;
{
int _gid_x43 = gid_x + -2;
int _gid_y43 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x43 >= input_width)
_gid_x43 = input_width - 1;
if (_gid_x43 < 0)
_gid_x43 = 0;
_tmp42 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y43) * input_stride + _gid_x43);
}
{
int _gid_x44 = gid_x + -1;
int _gid_y44 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x44 >= input_width)
_gid_x44 = input_width - 1;
if (_gid_x44 < 0)
_gid_x44 = 0;
_tmp42 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y44) * input_stride + _gid_x44);
}
{
int _gid_x45 = gid_x + 0;
int _gid_y45 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x45 >= input_width)
_gid_x45 = input_width - 1;
if (_gid_x45 < 0)
_gid_x45 = 0;
_tmp42 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y45) * input_stride + _gid_x45);
}
{
int _gid_x46 = gid_x + 1;
int _gid_y46 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x46 >= input_width)
_gid_x46 = input_width - 1;
if (_gid_x46 < 0)
_gid_x46 = 0;
_tmp42 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y46) * input_stride + _gid_x46);
}
{
int _gid_x47 = gid_x + 2;
int _gid_y47 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x47 >= input_width)
_gid_x47 = input_width - 1;
if (_gid_x47 < 0)
_gid_x47 = 0;
_tmp42 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y47) * input_stride + _gid_x47);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp42);
}
}
}
goto BH_EXIT;
BH_B:
{
if (gid_y < iter_height) {
float _tmp48 = 0.F;
{
int _gid_x49 = gid_x + -2;
int _gid_y49 = gid_y + 0;
if (_gid_y49 >= input_height)
_gid_y49 = input_height - 1;
_tmp48 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y49) * input_stride + _gid_x49);
}
{
int _gid_x50 = gid_x + -1;
int _gid_y50 = gid_y + 0;
if (_gid_y50 >= input_height)
_gid_y50 = input_height - 1;
_tmp48 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y50) * input_stride + _gid_x50);
}
{
int _gid_x51 = gid_x + 0;
int _gid_y51 = gid_y + 0;
if (_gid_y51 >= input_height)
_gid_y51 = input_height - 1;
_tmp48 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y51) * input_stride + _gid_x51);
}
{
int _gid_x52 = gid_x + 1;
int _gid_y52 = gid_y + 0;
if (_gid_y52 >= input_height)
_gid_y52 = input_height - 1;
_tmp48 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y52) * input_stride + _gid_x52);
}
{
int _gid_x53 = gid_x + 2;
int _gid_y53 = gid_y + 0;
if (_gid_y53 >= input_height)
_gid_y53 = input_height - 1;
_tmp48 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y53) * input_stride + _gid_x53);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp48);
}
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp54 = 0.F;
{
int _gid_x55 = gid_x + -2;
int _gid_y55 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y55 >= input_height)
_gid_y55 = input_height - 1;
_tmp54 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y55) * input_stride + _gid_x55);
}
{
int _gid_x56 = gid_x + -1;
int _gid_y56 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y56 >= input_height)
_gid_y56 = input_height - 1;
_tmp54 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y56) * input_stride + _gid_x56);
}
{
int _gid_x57 = gid_x + 0;
int _gid_y57 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y57 >= input_height)
_gid_y57 = input_height - 1;
_tmp54 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y57) * input_stride + _gid_x57);
}
{
int _gid_x58 = gid_x + 1;
int _gid_y58 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y58 >= input_height)
_gid_y58 = input_height - 1;
_tmp54 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y58) * input_stride + _gid_x58);
}
{
int _gid_x59 = gid_x + 2;
int _gid_y59 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_y59 >= input_height)
_gid_y59 = input_height - 1;
_tmp54 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y59) * input_stride + _gid_x59);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp54);
}
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp60 = 0.F;
{
int _gid_x61 = gid_x + -2;
int _gid_y61 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y61 >= input_height)
_gid_y61 = input_height - 1;
_tmp60 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y61) * input_stride + _gid_x61);
}
{
int _gid_x62 = gid_x + -1;
int _gid_y62 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y62 >= input_height)
_gid_y62 = input_height - 1;
_tmp60 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y62) * input_stride + _gid_x62);
}
{
int _gid_x63 = gid_x + 0;
int _gid_y63 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y63 >= input_height)
_gid_y63 = input_height - 1;
_tmp60 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y63) * input_stride + _gid_x63);
}
{
int _gid_x64 = gid_x + 1;
int _gid_y64 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y64 >= input_height)
_gid_y64 = input_height - 1;
_tmp60 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y64) * input_stride + _gid_x64);
}
{
int _gid_x65 = gid_x + 2;
int _gid_y65 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_y65 >= input_height)
_gid_y65 = input_height - 1;
_tmp60 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y65) * input_stride + _gid_x65);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp60);
}
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp66 = 0.F;
{
int _gid_x67 = gid_x + -2;
int _gid_y67 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y67 >= input_height)
_gid_y67 = input_height - 1;
_tmp66 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y67) * input_stride + _gid_x67);
}
{
int _gid_x68 = gid_x + -1;
int _gid_y68 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y68 >= input_height)
_gid_y68 = input_height - 1;
_tmp66 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y68) * input_stride + _gid_x68);
}
{
int _gid_x69 = gid_x + 0;
int _gid_y69 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y69 >= input_height)
_gid_y69 = input_height - 1;
_tmp66 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y69) * input_stride + _gid_x69);
}
{
int _gid_x70 = gid_x + 1;
int _gid_y70 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y70 >= input_height)
_gid_y70 = input_height - 1;
_tmp66 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y70) * input_stride + _gid_x70);
}
{
int _gid_x71 = gid_x + 2;
int _gid_y71 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_y71 >= input_height)
_gid_y71 = input_height - 1;
_tmp66 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y71) * input_stride + _gid_x71);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp66);
}
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp72 = 0.F;
{
int _gid_x73 = gid_x + -2;
int _gid_y73 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y73 >= input_height)
_gid_y73 = input_height - 1;
_tmp72 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y73) * input_stride + _gid_x73);
}
{
int _gid_x74 = gid_x + -1;
int _gid_y74 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y74 >= input_height)
_gid_y74 = input_height - 1;
_tmp72 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y74) * input_stride + _gid_x74);
}
{
int _gid_x75 = gid_x + 0;
int _gid_y75 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y75 >= input_height)
_gid_y75 = input_height - 1;
_tmp72 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y75) * input_stride + _gid_x75);
}
{
int _gid_x76 = gid_x + 1;
int _gid_y76 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y76 >= input_height)
_gid_y76 = input_height - 1;
_tmp72 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y76) * input_stride + _gid_x76);
}
{
int _gid_x77 = gid_x + 2;
int _gid_y77 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_y77 >= input_height)
_gid_y77 = input_height - 1;
_tmp72 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y77) * input_stride + _gid_x77);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp72);
}
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp78 = 0.F;
{
int _gid_x79 = gid_x + -2;
int _gid_y79 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y79 >= input_height)
_gid_y79 = input_height - 1;
_tmp78 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y79) * input_stride + _gid_x79);
}
{
int _gid_x80 = gid_x + -1;
int _gid_y80 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y80 >= input_height)
_gid_y80 = input_height - 1;
_tmp78 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y80) * input_stride + _gid_x80);
}
{
int _gid_x81 = gid_x + 0;
int _gid_y81 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y81 >= input_height)
_gid_y81 = input_height - 1;
_tmp78 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y81) * input_stride + _gid_x81);
}
{
int _gid_x82 = gid_x + 1;
int _gid_y82 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y82 >= input_height)
_gid_y82 = input_height - 1;
_tmp78 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y82) * input_stride + _gid_x82);
}
{
int _gid_x83 = gid_x + 2;
int _gid_y83 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_y83 >= input_height)
_gid_y83 = input_height - 1;
_tmp78 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y83) * input_stride + _gid_x83);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp78);
}
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp84 = 0.F;
{
int _gid_x85 = gid_x + -2;
int _gid_y85 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y85 >= input_height)
_gid_y85 = input_height - 1;
_tmp84 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y85) * input_stride + _gid_x85);
}
{
int _gid_x86 = gid_x + -1;
int _gid_y86 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y86 >= input_height)
_gid_y86 = input_height - 1;
_tmp84 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y86) * input_stride + _gid_x86);
}
{
int _gid_x87 = gid_x + 0;
int _gid_y87 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y87 >= input_height)
_gid_y87 = input_height - 1;
_tmp84 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y87) * input_stride + _gid_x87);
}
{
int _gid_x88 = gid_x + 1;
int _gid_y88 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y88 >= input_height)
_gid_y88 = input_height - 1;
_tmp84 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y88) * input_stride + _gid_x88);
}
{
int _gid_x89 = gid_x + 2;
int _gid_y89 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_y89 >= input_height)
_gid_y89 = input_height - 1;
_tmp84 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y89) * input_stride + _gid_x89);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp84);
}
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp90 = 0.F;
{
int _gid_x91 = gid_x + -2;
int _gid_y91 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y91 >= input_height)
_gid_y91 = input_height - 1;
_tmp90 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y91) * input_stride + _gid_x91);
}
{
int _gid_x92 = gid_x + -1;
int _gid_y92 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y92 >= input_height)
_gid_y92 = input_height - 1;
_tmp90 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y92) * input_stride + _gid_x92);
}
{
int _gid_x93 = gid_x + 0;
int _gid_y93 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y93 >= input_height)
_gid_y93 = input_height - 1;
_tmp90 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y93) * input_stride + _gid_x93);
}
{
int _gid_x94 = gid_x + 1;
int _gid_y94 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y94 >= input_height)
_gid_y94 = input_height - 1;
_tmp90 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y94) * input_stride + _gid_x94);
}
{
int _gid_x95 = gid_x + 2;
int _gid_y95 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_y95 >= input_height)
_gid_y95 = input_height - 1;
_tmp90 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y95) * input_stride + _gid_x95);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp90);
}
}
goto BH_EXIT;
BH_R:
{
if (gid_x < iter_width) {
if (gid_y < iter_height) {
float _tmp96 = 0.F;
{
int _gid_x97 = gid_x + -2;
int _gid_y97 = gid_y + 0;
if (_gid_x97 >= input_width)
_gid_x97 = input_width - 1;
_tmp96 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y97) * input_stride + _gid_x97);
}
{
int _gid_x98 = gid_x + -1;
int _gid_y98 = gid_y + 0;
if (_gid_x98 >= input_width)
_gid_x98 = input_width - 1;
_tmp96 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y98) * input_stride + _gid_x98);
}
{
int _gid_x99 = gid_x + 0;
int _gid_y99 = gid_y + 0;
if (_gid_x99 >= input_width)
_gid_x99 = input_width - 1;
_tmp96 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y99) * input_stride + _gid_x99);
}
{
int _gid_x100 = gid_x + 1;
int _gid_y100 = gid_y + 0;
if (_gid_x100 >= input_width)
_gid_x100 = input_width - 1;
_tmp96 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y100) * input_stride + _gid_x100);
}
{
int _gid_x101 = gid_x + 2;
int _gid_y101 = gid_y + 0;
if (_gid_x101 >= input_width)
_gid_x101 = input_width - 1;
_tmp96 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y101) * input_stride + _gid_x101);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp96);
}
}
if (gid_x < iter_width) {
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp102 = 0.F;
{
int _gid_x103 = gid_x + -2;
int _gid_y103 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x103 >= input_width)
_gid_x103 = input_width - 1;
_tmp102 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y103) * input_stride + _gid_x103);
}
{
int _gid_x104 = gid_x + -1;
int _gid_y104 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x104 >= input_width)
_gid_x104 = input_width - 1;
_tmp102 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y104) * input_stride + _gid_x104);
}
{
int _gid_x105 = gid_x + 0;
int _gid_y105 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x105 >= input_width)
_gid_x105 = input_width - 1;
_tmp102 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y105) * input_stride + _gid_x105);
}
{
int _gid_x106 = gid_x + 1;
int _gid_y106 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x106 >= input_width)
_gid_x106 = input_width - 1;
_tmp102 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y106) * input_stride + _gid_x106);
}
{
int _gid_x107 = gid_x + 2;
int _gid_y107 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x107 >= input_width)
_gid_x107 = input_width - 1;
_tmp102 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y107) * input_stride + _gid_x107);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp102);
}
}
if (gid_x < iter_width) {
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp108 = 0.F;
{
int _gid_x109 = gid_x + -2;
int _gid_y109 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x109 >= input_width)
_gid_x109 = input_width - 1;
_tmp108 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y109) * input_stride + _gid_x109);
}
{
int _gid_x110 = gid_x + -1;
int _gid_y110 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x110 >= input_width)
_gid_x110 = input_width - 1;
_tmp108 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y110) * input_stride + _gid_x110);
}
{
int _gid_x111 = gid_x + 0;
int _gid_y111 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x111 >= input_width)
_gid_x111 = input_width - 1;
_tmp108 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y111) * input_stride + _gid_x111);
}
{
int _gid_x112 = gid_x + 1;
int _gid_y112 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x112 >= input_width)
_gid_x112 = input_width - 1;
_tmp108 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y112) * input_stride + _gid_x112);
}
{
int _gid_x113 = gid_x + 2;
int _gid_y113 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x113 >= input_width)
_gid_x113 = input_width - 1;
_tmp108 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y113) * input_stride + _gid_x113);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp108);
}
}
if (gid_x < iter_width) {
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp114 = 0.F;
{
int _gid_x115 = gid_x + -2;
int _gid_y115 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x115 >= input_width)
_gid_x115 = input_width - 1;
_tmp114 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y115) * input_stride + _gid_x115);
}
{
int _gid_x116 = gid_x + -1;
int _gid_y116 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x116 >= input_width)
_gid_x116 = input_width - 1;
_tmp114 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y116) * input_stride + _gid_x116);
}
{
int _gid_x117 = gid_x + 0;
int _gid_y117 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x117 >= input_width)
_gid_x117 = input_width - 1;
_tmp114 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y117) * input_stride + _gid_x117);
}
{
int _gid_x118 = gid_x + 1;
int _gid_y118 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x118 >= input_width)
_gid_x118 = input_width - 1;
_tmp114 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y118) * input_stride + _gid_x118);
}
{
int _gid_x119 = gid_x + 2;
int _gid_y119 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x119 >= input_width)
_gid_x119 = input_width - 1;
_tmp114 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y119) * input_stride + _gid_x119);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp114);
}
}
if (gid_x < iter_width) {
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp120 = 0.F;
{
int _gid_x121 = gid_x + -2;
int _gid_y121 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x121 >= input_width)
_gid_x121 = input_width - 1;
_tmp120 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y121) * input_stride + _gid_x121);
}
{
int _gid_x122 = gid_x + -1;
int _gid_y122 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x122 >= input_width)
_gid_x122 = input_width - 1;
_tmp120 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y122) * input_stride + _gid_x122);
}
{
int _gid_x123 = gid_x + 0;
int _gid_y123 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x123 >= input_width)
_gid_x123 = input_width - 1;
_tmp120 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y123) * input_stride + _gid_x123);
}
{
int _gid_x124 = gid_x + 1;
int _gid_y124 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x124 >= input_width)
_gid_x124 = input_width - 1;
_tmp120 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y124) * input_stride + _gid_x124);
}
{
int _gid_x125 = gid_x + 2;
int _gid_y125 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x125 >= input_width)
_gid_x125 = input_width - 1;
_tmp120 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y125) * input_stride + _gid_x125);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp120);
}
}
if (gid_x < iter_width) {
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp126 = 0.F;
{
int _gid_x127 = gid_x + -2;
int _gid_y127 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x127 >= input_width)
_gid_x127 = input_width - 1;
_tmp126 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y127) * input_stride + _gid_x127);
}
{
int _gid_x128 = gid_x + -1;
int _gid_y128 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x128 >= input_width)
_gid_x128 = input_width - 1;
_tmp126 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y128) * input_stride + _gid_x128);
}
{
int _gid_x129 = gid_x + 0;
int _gid_y129 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x129 >= input_width)
_gid_x129 = input_width - 1;
_tmp126 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y129) * input_stride + _gid_x129);
}
{
int _gid_x130 = gid_x + 1;
int _gid_y130 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x130 >= input_width)
_gid_x130 = input_width - 1;
_tmp126 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y130) * input_stride + _gid_x130);
}
{
int _gid_x131 = gid_x + 2;
int _gid_y131 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x131 >= input_width)
_gid_x131 = input_width - 1;
_tmp126 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y131) * input_stride + _gid_x131);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp126);
}
}
if (gid_x < iter_width) {
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp132 = 0.F;
{
int _gid_x133 = gid_x + -2;
int _gid_y133 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x133 >= input_width)
_gid_x133 = input_width - 1;
_tmp132 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y133) * input_stride + _gid_x133);
}
{
int _gid_x134 = gid_x + -1;
int _gid_y134 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x134 >= input_width)
_gid_x134 = input_width - 1;
_tmp132 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y134) * input_stride + _gid_x134);
}
{
int _gid_x135 = gid_x + 0;
int _gid_y135 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x135 >= input_width)
_gid_x135 = input_width - 1;
_tmp132 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y135) * input_stride + _gid_x135);
}
{
int _gid_x136 = gid_x + 1;
int _gid_y136 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x136 >= input_width)
_gid_x136 = input_width - 1;
_tmp132 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y136) * input_stride + _gid_x136);
}
{
int _gid_x137 = gid_x + 2;
int _gid_y137 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x137 >= input_width)
_gid_x137 = input_width - 1;
_tmp132 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y137) * input_stride + _gid_x137);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp132);
}
}
if (gid_x < iter_width) {
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp138 = 0.F;
{
int _gid_x139 = gid_x + -2;
int _gid_y139 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x139 >= input_width)
_gid_x139 = input_width - 1;
_tmp138 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y139) * input_stride + _gid_x139);
}
{
int _gid_x140 = gid_x + -1;
int _gid_y140 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x140 >= input_width)
_gid_x140 = input_width - 1;
_tmp138 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y140) * input_stride + _gid_x140);
}
{
int _gid_x141 = gid_x + 0;
int _gid_y141 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x141 >= input_width)
_gid_x141 = input_width - 1;
_tmp138 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y141) * input_stride + _gid_x141);
}
{
int _gid_x142 = gid_x + 1;
int _gid_y142 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x142 >= input_width)
_gid_x142 = input_width - 1;
_tmp138 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y142) * input_stride + _gid_x142);
}
{
int _gid_x143 = gid_x + 2;
int _gid_y143 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x143 >= input_width)
_gid_x143 = input_width - 1;
_tmp138 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y143) * input_stride + _gid_x143);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp138);
}
}
}
goto BH_EXIT;
BH_L:
{
if (gid_y < iter_height) {
float _tmp144 = 0.F;
{
int _gid_x145 = gid_x + -2;
int _gid_y145 = gid_y + 0;
if (_gid_x145 < 0)
_gid_x145 = 0;
_tmp144 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y145) * input_stride + _gid_x145);
}
{
int _gid_x146 = gid_x + -1;
int _gid_y146 = gid_y + 0;
if (_gid_x146 < 0)
_gid_x146 = 0;
_tmp144 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y146) * input_stride + _gid_x146);
}
{
int _gid_x147 = gid_x + 0;
int _gid_y147 = gid_y + 0;
if (_gid_x147 < 0)
_gid_x147 = 0;
_tmp144 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y147) * input_stride + _gid_x147);
}
{
int _gid_x148 = gid_x + 1;
int _gid_y148 = gid_y + 0;
if (_gid_x148 < 0)
_gid_x148 = 0;
_tmp144 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y148) * input_stride + _gid_x148);
}
{
int _gid_x149 = gid_x + 2;
int _gid_y149 = gid_y + 0;
if (_gid_x149 < 0)
_gid_x149 = 0;
_tmp144 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y149) * input_stride + _gid_x149);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp144);
}
if (gid_y + 1 * (int)blockDim.y < iter_height) {
float _tmp150 = 0.F;
{
int _gid_x151 = gid_x + -2;
int _gid_y151 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x151 < 0)
_gid_x151 = 0;
_tmp150 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y151) * input_stride + _gid_x151);
}
{
int _gid_x152 = gid_x + -1;
int _gid_y152 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x152 < 0)
_gid_x152 = 0;
_tmp150 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y152) * input_stride + _gid_x152);
}
{
int _gid_x153 = gid_x + 0;
int _gid_y153 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x153 < 0)
_gid_x153 = 0;
_tmp150 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y153) * input_stride + _gid_x153);
}
{
int _gid_x154 = gid_x + 1;
int _gid_y154 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x154 < 0)
_gid_x154 = 0;
_tmp150 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y154) * input_stride + _gid_x154);
}
{
int _gid_x155 = gid_x + 2;
int _gid_y155 = gid_y + 1 * (int)blockDim.y + 0;
if (_gid_x155 < 0)
_gid_x155 = 0;
_tmp150 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y155) * input_stride + _gid_x155);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp150);
}
if (gid_y + 2 * (int)blockDim.y < iter_height) {
float _tmp156 = 0.F;
{
int _gid_x157 = gid_x + -2;
int _gid_y157 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x157 < 0)
_gid_x157 = 0;
_tmp156 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y157) * input_stride + _gid_x157);
}
{
int _gid_x158 = gid_x + -1;
int _gid_y158 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x158 < 0)
_gid_x158 = 0;
_tmp156 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y158) * input_stride + _gid_x158);
}
{
int _gid_x159 = gid_x + 0;
int _gid_y159 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x159 < 0)
_gid_x159 = 0;
_tmp156 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y159) * input_stride + _gid_x159);
}
{
int _gid_x160 = gid_x + 1;
int _gid_y160 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x160 < 0)
_gid_x160 = 0;
_tmp156 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y160) * input_stride + _gid_x160);
}
{
int _gid_x161 = gid_x + 2;
int _gid_y161 = gid_y + 2 * (int)blockDim.y + 0;
if (_gid_x161 < 0)
_gid_x161 = 0;
_tmp156 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y161) * input_stride + _gid_x161);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp156);
}
if (gid_y + 3 * (int)blockDim.y < iter_height) {
float _tmp162 = 0.F;
{
int _gid_x163 = gid_x + -2;
int _gid_y163 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x163 < 0)
_gid_x163 = 0;
_tmp162 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y163) * input_stride + _gid_x163);
}
{
int _gid_x164 = gid_x + -1;
int _gid_y164 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x164 < 0)
_gid_x164 = 0;
_tmp162 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y164) * input_stride + _gid_x164);
}
{
int _gid_x165 = gid_x + 0;
int _gid_y165 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x165 < 0)
_gid_x165 = 0;
_tmp162 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y165) * input_stride + _gid_x165);
}
{
int _gid_x166 = gid_x + 1;
int _gid_y166 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x166 < 0)
_gid_x166 = 0;
_tmp162 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y166) * input_stride + _gid_x166);
}
{
int _gid_x167 = gid_x + 2;
int _gid_y167 = gid_y + 3 * (int)blockDim.y + 0;
if (_gid_x167 < 0)
_gid_x167 = 0;
_tmp162 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y167) * input_stride + _gid_x167);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp162);
}
if (gid_y + 4 * (int)blockDim.y < iter_height) {
float _tmp168 = 0.F;
{
int _gid_x169 = gid_x + -2;
int _gid_y169 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x169 < 0)
_gid_x169 = 0;
_tmp168 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y169) * input_stride + _gid_x169);
}
{
int _gid_x170 = gid_x + -1;
int _gid_y170 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x170 < 0)
_gid_x170 = 0;
_tmp168 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y170) * input_stride + _gid_x170);
}
{
int _gid_x171 = gid_x + 0;
int _gid_y171 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x171 < 0)
_gid_x171 = 0;
_tmp168 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y171) * input_stride + _gid_x171);
}
{
int _gid_x172 = gid_x + 1;
int _gid_y172 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x172 < 0)
_gid_x172 = 0;
_tmp168 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y172) * input_stride + _gid_x172);
}
{
int _gid_x173 = gid_x + 2;
int _gid_y173 = gid_y + 4 * (int)blockDim.y + 0;
if (_gid_x173 < 0)
_gid_x173 = 0;
_tmp168 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y173) * input_stride + _gid_x173);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp168);
}
if (gid_y + 5 * (int)blockDim.y < iter_height) {
float _tmp174 = 0.F;
{
int _gid_x175 = gid_x + -2;
int _gid_y175 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x175 < 0)
_gid_x175 = 0;
_tmp174 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y175) * input_stride + _gid_x175);
}
{
int _gid_x176 = gid_x + -1;
int _gid_y176 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x176 < 0)
_gid_x176 = 0;
_tmp174 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y176) * input_stride + _gid_x176);
}
{
int _gid_x177 = gid_x + 0;
int _gid_y177 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x177 < 0)
_gid_x177 = 0;
_tmp174 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y177) * input_stride + _gid_x177);
}
{
int _gid_x178 = gid_x + 1;
int _gid_y178 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x178 < 0)
_gid_x178 = 0;
_tmp174 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y178) * input_stride + _gid_x178);
}
{
int _gid_x179 = gid_x + 2;
int _gid_y179 = gid_y + 5 * (int)blockDim.y + 0;
if (_gid_x179 < 0)
_gid_x179 = 0;
_tmp174 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y179) * input_stride + _gid_x179);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp174);
}
if (gid_y + 6 * (int)blockDim.y < iter_height) {
float _tmp180 = 0.F;
{
int _gid_x181 = gid_x + -2;
int _gid_y181 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x181 < 0)
_gid_x181 = 0;
_tmp180 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y181) * input_stride + _gid_x181);
}
{
int _gid_x182 = gid_x + -1;
int _gid_y182 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x182 < 0)
_gid_x182 = 0;
_tmp180 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y182) * input_stride + _gid_x182);
}
{
int _gid_x183 = gid_x + 0;
int _gid_y183 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x183 < 0)
_gid_x183 = 0;
_tmp180 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y183) * input_stride + _gid_x183);
}
{
int _gid_x184 = gid_x + 1;
int _gid_y184 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x184 < 0)
_gid_x184 = 0;
_tmp180 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y184) * input_stride + _gid_x184);
}
{
int _gid_x185 = gid_x + 2;
int _gid_y185 = gid_y + 6 * (int)blockDim.y + 0;
if (_gid_x185 < 0)
_gid_x185 = 0;
_tmp180 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y185) * input_stride + _gid_x185);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp180);
}
if (gid_y + 7 * (int)blockDim.y < iter_height) {
float _tmp186 = 0.F;
{
int _gid_x187 = gid_x + -2;
int _gid_y187 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x187 < 0)
_gid_x187 = 0;
_tmp186 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y187) * input_stride + _gid_x187);
}
{
int _gid_x188 = gid_x + -1;
int _gid_y188 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x188 < 0)
_gid_x188 = 0;
_tmp186 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y188) * input_stride + _gid_x188);
}
{
int _gid_x189 = gid_x + 0;
int _gid_y189 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x189 < 0)
_gid_x189 = 0;
_tmp186 += 0.369545996F * tex1Dfetch(_texinputX, (_gid_y189) * input_stride + _gid_x189);
}
{
int _gid_x190 = gid_x + 1;
int _gid_y190 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x190 < 0)
_gid_x190 = 0;
_tmp186 += 0.244460002F * tex1Dfetch(_texinputX, (_gid_y190) * input_stride + _gid_x190);
}
{
int _gid_x191 = gid_x + 2;
int _gid_y191 = gid_y + 7 * (int)blockDim.y + 0;
if (_gid_x191 < 0)
_gid_x191 = 0;
_tmp186 += 0.0707660019F * tex1Dfetch(_texinputX, (_gid_y191) * input_stride + _gid_x191);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp186);
}
}
goto BH_EXIT;
BH_NO:
{
{
float _tmp192 = 0.F;
{
_tmp192 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + -2);
}
{
_tmp192 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + -1);
}
{
_tmp192 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + 0);
}
{
_tmp192 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + 1);
}
{
_tmp192 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y) * iter_stride + gid_x] = (float)(_tmp192);
}
{
float _tmp193 = 0.F;
{
_tmp193 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp193 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp193 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp193 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp193 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 1 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 1 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp193);
}
{
float _tmp194 = 0.F;
{
_tmp194 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp194 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp194 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp194 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp194 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 2 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 2 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp194);
}
{
float _tmp195 = 0.F;
{
_tmp195 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp195 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp195 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp195 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp195 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 3 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 3 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp195);
}
{
float _tmp196 = 0.F;
{
_tmp196 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp196 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp196 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp196 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp196 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 4 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 4 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp196);
}
{
float _tmp197 = 0.F;
{
_tmp197 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp197 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp197 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp197 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp197 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 5 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 5 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp197);
}
{
float _tmp198 = 0.F;
{
_tmp198 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp198 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp198 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp198 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp198 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 6 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 6 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp198);
}
{
float _tmp199 = 0.F;
{
_tmp199 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + -2);
}
{
_tmp199 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + -1);
}
{
_tmp199 += 0.369545996F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + 0);
}
{
_tmp199 += 0.244460002F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + 1);
}
{
_tmp199 += 0.0707660019F * tex1Dfetch(_texinputX, (gid_y + 7 * (int)blockDim.y + 0) * input_stride + gid_x + 2);
}
iter[(gid_y + 7 * (int)blockDim.y) * iter_stride + gid_x] = (float)(_tmp199);
}
}
goto BH_EXIT;
BH_EXIT:
;
}
}
#endif //_CUGAUSSIANFILTERROWX_CU_
|
ffeaf2561e07336e635de30ce19a80943b0b1d29.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include "kmeansCPU.cu"
// includes, project
#include <hip/hip_runtime.h>
#define CUDA_SAFE_CALL_NO_SYNC(call) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA_SAFE_CALL(call) do { \
CUDA_SAFE_CALL_NO_SYNC(call); \
hipError_t err = hipDeviceSynchronize(); \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
////////////////////////////////////////////////////////////////////////
typedef struct CoreLocat{
int label;
float *center;
}Core_t;
typedef struct ICD_t{
float *col;
}ICD_table;
typedef struct RID_t{
float *col;
}RID_table;
typedef struct SetData{
int *Group;
int label;
}Set;
typedef struct ClusterSet_t{
Set *GroupA;
}ClusterSet;
typedef struct RandCluster_t{
float *vector;
int label;
}RandCluster;
//-------------------------------------------------------------------------------------------
//Read data from file
//Initialize the centers of the clusters
Core_t Getcentroid(int dimen, RandCluster *h_sample, Core_t *core, int k, int data){
for(int i = 0; i < k; i++)
{
int randNum = rand()%(data);
core[i].center = h_sample[randNum].vector;
core[i].label = i;
}
return *core;
}
float dimcalculate(Core_t *core,float **ICD, int k, int dimen){
for(int i = 0; i < k; i++){
for(int j=0 ; j<k; j++){
for(int d=0; d<dimen; d++){
ICD[i][j] += abs((float)core[i].center[d] -(float) core[j].center[d]);
}
}
}
return **ICD;
}
RID_table sorting(ICD_table *ICD, RID_table *RID, int s){
int i, j,m;
float temp;
float array[s][s];
for(i = 0; i < s; i++ )
for(j = 0; j < s; j++)
array[i][j] = ICD[i].col[j];
for(i = 0; i < s; i++ )
for(j = 0; j < s ; j++)
for(m = j+1; m < s; m++){
if(ICD[i].col[j] > ICD[i].col[m]){
temp = ICD[i].col[j];
ICD[i].col[j] = ICD[i].col[m];
ICD[i].col[m] = temp;
}
}
for(i = 0; i < s; i++ )
for(j = 0; j < s; j++)
for(m = 0; m < s; m++){
if(array[i][m] == ICD[i].col[j])
RID[i].col[j] = m;
}
return *RID;
}
__device__
float dist(int index, float* d_point, float *core, int dimen ){
float result = 0;
for(int d=0; d<dimen; d++)
result += abs(core[d] - d_point[index*dimen+d]);
return result;
}
__global__
void KmeanKernel(float* d_M, float* d_temp,Core_t *d_core, float *d_point,ICD_table *ICD, RID_table *RID, int k, int dimen,int* label, int Data){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int oldCnt;
float oldDist;
int newCnt ;
int curCnt;
float curDist;
float newDist = 0;
int count = 0;
extern __shared__ int smem[];
if(i<Data){
smem[0] = 0;
for(int in = 0; in < Data; in++)
{
smem[in] = count;
count++;
};
__syncthreads();
oldCnt = label[i];
oldDist = dist(smem[i],d_point, d_core[oldCnt].center, dimen);
newCnt = oldCnt;
newDist = dist(smem[i], d_point,d_core[newCnt].center, dimen);
for(int j = 2; j < k; j++){
curCnt = RID[oldCnt].col[j];
if(ICD[oldCnt].col[curCnt] > 2*oldDist) break;
curDist = dist(smem[i],d_point, d_core[curCnt].center, dimen);
if(curDist < newDist){
newDist = curDist;
newCnt = curCnt;
}
}
d_M[i] = dist(smem[i], d_point, d_core[newCnt].center, dimen)/Data;
label[i] = newCnt;
}
__syncthreads();
}
__global__
void Kmean( float *d_output, int k, int Data, float *d_point, int dimen, int*index, int *count){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k){
for(int a = 0; a<Data; a++){
if(index[a] == tid)
atomicAdd(&count[tid],1);
}
for(int i = 0; i<Data; i++)
if(index[i] == tid){
for(int d =0; d<dimen; d++)
d_output[tid*dimen+d] += d_point[i*dimen+d] ;
}
for(int d = 0; d<dimen; d++){
if(count[tid] == 0)
d_output[tid*dimen+d] = d_point[tid*dimen+d];
else
d_output[tid*dimen+d] = d_output[tid*dimen+d]/count[tid] ;
}
}
}
int main(int argc, char **argv){
hipEvent_t ICD_start, ICD_stop;
hipEventCreate(&ICD_start);
hipEventCreate(&ICD_stop);
hipEvent_t RID_start, RID_stop;
hipEventCreate(&RID_start);
hipEventCreate(&RID_stop);
hipEvent_t Kmean_start, Kmean_stop;
hipEventCreate(&Kmean_start);
hipEventCreate(&Kmean_stop);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEvent_t Kmean_Algor_start, Kmean_Algor_stop;
hipEventCreate(&Kmean_Algor_start);
hipEventCreate(&Kmean_Algor_stop);
srand(time(NULL));
if (argc < 2) {
fprintf(stderr, "Usage: %s dimension numData Cluster\n", argv[0]);
exit(1);
}
int k = atoi(argv[1]); //Cluster
printf("Value K: %d\n", k);
//---------------start reading data-------------------------
int *sample;
int numberOfData = 0;
FILE *file = fopen("test.txt", "r");
if(file == NULL){
printf("Error opening file!\n");
exit(1);
}
int ch;
int numberofLines =0;
while((ch = fgetc(file))!=EOF)
if(ch == '\n') numberofLines++;
fclose(file);
file = fopen("test.txt", "r");
while(fscanf(file, "%d ", &sample) != EOF){
numberOfData++;
}
fclose(file);
//Read the data from file
int Data = numberofLines;
int n = numberOfData;
int m = numberofLines;
int dimen = (n/m);
RandCluster *Points = new RandCluster[ Data];
for(int i = 0; i < Data; i++)
Points[i].vector = new float[dimen];
float *FixedData;
FixedData = (float*)malloc(sizeof(float)*Data*dimen);
file = fopen("test.txt","r");
float **temp;
temp = (float**)malloc(Data*sizeof(float*));
for(int i=0; i<Data; i++)
temp[i] = (float*)malloc(sizeof(float)*dimen);
while(!feof(file)){
for(int i = 0; i < Data ; i++){
for(int j = 0; j < dimen; j++){
fscanf(file,"%f ",& temp[i][j]);
Points[i].vector[j] = temp[i][j];
FixedData[j+i*dimen] = Points[i].vector[j];
Points[i].label = 0;
}
}
}
fclose(file);
//(----------------Read data ends------------------
Core_t *h_core;
float **ICD; // kxk matrix stnt c = 0; c < k; c++){
ICD = (float**)malloc(k*sizeof(float*));
for(int i = 0; i<k; i++)
ICD[i] = (float*)malloc(sizeof(float)*k);
for(int i = 0; i < k; i++)
for(int j = 0; j < k; j++)
ICD[i][j] = 0;
//initialize function set
h_core = (Core_t*)malloc(sizeof(Core_t)*k);
for(int i = 0; i < k ; i++)
h_core[i].center = (float*)malloc(sizeof(float)*dimen);
ICD_table *ICD_row = (ICD_table*)malloc(sizeof( ICD_table)*k);
for(int i = 0; i < k ; i++)
ICD_row[i].col = new float[k];
RID_table *RID_row = new RID_table[k];
for(int i = 0; i < k ; i++)
RID_row[i].col = new float[k];
for(int i=0; i<k; i++)
for(int j=0; j<k; j++){
ICD_row[i].col[j] = 0;
}
//-----------------Algorithm Two-----------------------------------------------
//---Initialize parts for algotrihtm two-----------------------------------------
float* d_output;
int *d_index;
int *h_count = (int*)malloc(sizeof(int)*k);
float *d_input;
float *h_output = new float[Data*dimen];
int *count;
int *zero = new int[k];
for(int i = 0; i < k; i++) zero[i] = 0;
int threadsPerblock = 32;
int numBlocks = (Data + threadsPerblock - 1) / threadsPerblock;
float *d_point;
int *initial = new int[Data];
int *d_Fixedlabel;
float *h_temp = new float[dimen];
float *d_temp;
float *T_inti = new float[dimen];
for(int i = 0; i < dimen; i++) T_inti[i] = 0;
int *compareLabel = new int[Data];
for(int i = 0; i < Data; i++) compareLabel[i] = 0;
int mm = 0;
for(int i = 0; i < k; i++){
for(int j = 0; j < Data/k; j++){
initial[j+i*Data/k] = mm;
}
mm++;
}
float *h_M =( float*)malloc(sizeof(float)*Data);
float *d_M ;
float *initial_M = new float[Data];
for(int i = 0; i < Data; i++)
initial_M[i] = 0;
float *h_sum_M = (float*)malloc(sizeof(float));
int *Fixedlabel = new int[Data];
//--------------------------device core define-------------------------------
Core_t *d_core;
Core_t *core = (Core_t*)malloc(sizeof(Core_t)*k);
float *d_center;
//--------------------------device core define-------------------------------
Core_t *d_core;
Core_t *core = (Core_t*)malloc(sizeof(Core_t)*k);
float *d_center;
//device matrix ICD and RID----
ICD_table *d_DDC;
ICD_table *DDC = (ICD_table*)malloc(sizeof(ICD_table)*k);
float *ICD_Value;
RID_table *d_RRD;
RID_table *RRD = (RID_table*)malloc(sizeof(RID_table)*k);
float *RRD_Value;
//------------algorithm two---------------------------------------------------
Getcentroid(dimen, Points ,h_core, k, Data);
int Jump = 0;
hipEventRecord(start,0);
do{
hipEventRecord(ICD_start, 0);
dimcalculate(h_core, ICD, k, dimen);
hipEventRecord(ICD_stop, 0);
hipEventRecord(RID_start, 0);
sorting(ICD_row, RID_row, k);
hipEventRecord(RID_stop, 0);
for(int d = 0; d < k; d++){
CUDA_SAFE_CALL( hipMalloc( (void**) &d_core, k*sizeof(Core_t)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_center, dimen*sizeof(float)));
CUDA_SAFE_CALL( hipMemcpy( d_center, h_core[d].center, dimen*sizeof(float), hipMemcpyHostToDevice) );
core[d].label = h_core[d].label;
core[d].center = d_center;
CUDA_SAFE_CALL( hipMemcpy( d_core, core, k*sizeof(Core_t), hipMemcpyHostToDevice) );
}
for(int d = 0; d < k; d++){
CUDA_SAFE_CALL( hipMalloc( (void**) &d_DDC, k*sizeof(ICD_table)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_RRD, k*sizeof(RID_table)));
CUDA_SAFE_CALL( hipMalloc( (void**) &ICD_Value, k*sizeof(float)));
CUDA_SAFE_CALL( hipMalloc( (void**) &RRD_Value, k*sizeof(float)));
CUDA_SAFE_CALL( hipMemcpy( ICD_Value, ICD_row[d].col, k*sizeof(float), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( RRD_Value, RID_row[d].col, k*sizeof(float), hipMemcpyHostToDevice) );
DDC[d].col = ICD_Value;
RRD[d].col = RRD_Value;
CUDA_SAFE_CALL( hipMemcpy( d_DDC, DDC, k*sizeof(ICD_table), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( d_RRD, RRD, k*sizeof(RID_table), hipMemcpyHostToDevice) );
}
//---------------------------------------------------------------------------
CUDA_SAFE_CALL( hipMalloc( (void**) &d_M, Data*sizeof(int)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_Fixedlabel, Data*sizeof(int)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_temp, dimen*sizeof(int)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_point, Data*dimen*sizeof(float)));
CUDA_SAFE_CALL( hipMemcpy( d_Fixedlabel, initial, Data*sizeof(int), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( d_point, FixedData, dimen*Data*sizeof(float), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( d_temp, T_inti, dimen*sizeof(int), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( d_M, h_M, Data*sizeof(float), hipMemcpyHostToDevice) );
hipEventRecord(Kmean_Algor_start, 0);
hipLaunchKernelGGL(( KmeanKernel), dim3(numBlocks), dim3(threadsPerblock), Data*sizeof(int), 0, d_M,d_temp,d_core, d_point, d_DDC, d_RRD, k, dimen, d_Fixedlabel, Data);
hipEventRecord(Kmean_Algor_stop, 0);
CUDA_SAFE_CALL( hipMemcpy( Fixedlabel, d_Fixedlabel, Data*sizeof(int), hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy( h_M, d_M, Data*sizeof(float), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL( hipMalloc( (void**) &count, k*sizeof(int)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_input, Data*dimen*sizeof(float)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_output, k*dimen*sizeof(float)));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_index, Data*sizeof(int)));
CUDA_SAFE_CALL( hipMemcpy( d_index, Fixedlabel, Data*sizeof(int), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( d_input, FixedData, Data*dimen*sizeof(float), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( count, zero, k*sizeof(int), hipMemcpyHostToDevice) );
float sum_M = 0;
for(int i = 0; i < Data; i++){
sum_M += h_M[i];
}
hipEventRecord(Kmean_start, 0);
hipLaunchKernelGGL(( Kmean), dim3(numBlocks), dim3(threadsPerblock), 0, 0, d_output, k, Data, d_input, dimen, d_index, count);
hipEventRecord(Kmean_stop, 0);
CUDA_SAFE_CALL( hipMemcpy( h_output, d_output, k*dimen*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy( h_count, count, k*sizeof(float), hipMemcpyDeviceToHost) );
for(int i =0; i < k; i++)
for(int j = 0; j < dimen; j++)
h_core[i].center[j] = h_output[i*dimen+j];
if(compareLabel == Fixedlabel) Jump++;
else Jump = 0;
if(Jump > 100) break;
compareLabel = Fixedlabel;
for(int i = 0; i < k; i ++){
if(h_count[i] == 0){
Getcentroid(dimen, Points ,h_core, k, Data);
}
}
} while(1);
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(ICD_start);
hipEventSynchronize(RID_start);
hipEventSynchronize(Kmean_start);
hipEventSynchronize(Kmean_Algor_start);
float Total_time;
float ICD_time;
float RID_time;
float Kmean_Algor_time;
float Kmean_time;
hipEventElapsedTime(&Total_time, start, stop);
hipEventElapsedTime(&ICD_time, ICD_start, ICD_stop);
hipEventElapsedTime(&RID_time, RID_start, RID_stop);
hipEventElapsedTime(&Kmean_Algor_time, Kmean_Algor_start, Kmean_Algor_stop);
hipEventElapsedTime(&Kmean_time, Kmean_start, Kmean_stop);
//for(int i = 0; i < k ; i++)
// printf("%d\t", h_count[i]);
printf("\n");
for(int i = 0; i< k; i++){
printf("Cluster center : [%d] ", i);
for(int j = 0; j < dimen; j++){
printf("%f\t", h_output[i*dimen+j]);
h_core[i].center[j] = h_output[i*dimen+j];
}
printf("\n");
}
printf("Total Processing time: %f\n", Total_time);
printf("ICD table processing time (one iteration): %f\n", ICD_time);
printf("RID table processing time (one iteration); %f\n", RID_time);
printf("Kmean Algorithm processing time (one iteration: %f\n", Kmean_Algor_time);
printf("Kmean calculation processing time (one iteration: %f\n", Kmean_time);
}
|
ffeaf2561e07336e635de30ce19a80943b0b1d29.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include "kmeansCPU.cu"
// includes, project
#include <cuda.h>
#define CUDA_SAFE_CALL_NO_SYNC(call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA_SAFE_CALL(call) do { \
CUDA_SAFE_CALL_NO_SYNC(call); \
cudaError err = cudaThreadSynchronize(); \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
////////////////////////////////////////////////////////////////////////
typedef struct CoreLocat{
int label;
float *center;
}Core_t;
typedef struct ICD_t{
float *col;
}ICD_table;
typedef struct RID_t{
float *col;
}RID_table;
typedef struct SetData{
int *Group;
int label;
}Set;
typedef struct ClusterSet_t{
Set *GroupA;
}ClusterSet;
typedef struct RandCluster_t{
float *vector;
int label;
}RandCluster;
//-------------------------------------------------------------------------------------------
//Read data from file
//Initialize the centers of the clusters
Core_t Getcentroid(int dimen, RandCluster *h_sample, Core_t *core, int k, int data){
for(int i = 0; i < k; i++)
{
int randNum = rand()%(data);
core[i].center = h_sample[randNum].vector;
core[i].label = i;
}
return *core;
}
float dimcalculate(Core_t *core,float **ICD, int k, int dimen){
for(int i = 0; i < k; i++){
for(int j=0 ; j<k; j++){
for(int d=0; d<dimen; d++){
ICD[i][j] += abs((float)core[i].center[d] -(float) core[j].center[d]);
}
}
}
return **ICD;
}
RID_table sorting(ICD_table *ICD, RID_table *RID, int s){
int i, j,m;
float temp;
float array[s][s];
for(i = 0; i < s; i++ )
for(j = 0; j < s; j++)
array[i][j] = ICD[i].col[j];
for(i = 0; i < s; i++ )
for(j = 0; j < s ; j++)
for(m = j+1; m < s; m++){
if(ICD[i].col[j] > ICD[i].col[m]){
temp = ICD[i].col[j];
ICD[i].col[j] = ICD[i].col[m];
ICD[i].col[m] = temp;
}
}
for(i = 0; i < s; i++ )
for(j = 0; j < s; j++)
for(m = 0; m < s; m++){
if(array[i][m] == ICD[i].col[j])
RID[i].col[j] = m;
}
return *RID;
}
__device__
float dist(int index, float* d_point, float *core, int dimen ){
float result = 0;
for(int d=0; d<dimen; d++)
result += abs(core[d] - d_point[index*dimen+d]);
return result;
}
__global__
void KmeanKernel(float* d_M, float* d_temp,Core_t *d_core, float *d_point,ICD_table *ICD, RID_table *RID, int k, int dimen,int* label, int Data){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int oldCnt;
float oldDist;
int newCnt ;
int curCnt;
float curDist;
float newDist = 0;
int count = 0;
extern __shared__ int smem[];
if(i<Data){
smem[0] = 0;
for(int in = 0; in < Data; in++)
{
smem[in] = count;
count++;
};
__syncthreads();
oldCnt = label[i];
oldDist = dist(smem[i],d_point, d_core[oldCnt].center, dimen);
newCnt = oldCnt;
newDist = dist(smem[i], d_point,d_core[newCnt].center, dimen);
for(int j = 2; j < k; j++){
curCnt = RID[oldCnt].col[j];
if(ICD[oldCnt].col[curCnt] > 2*oldDist) break;
curDist = dist(smem[i],d_point, d_core[curCnt].center, dimen);
if(curDist < newDist){
newDist = curDist;
newCnt = curCnt;
}
}
d_M[i] = dist(smem[i], d_point, d_core[newCnt].center, dimen)/Data;
label[i] = newCnt;
}
__syncthreads();
}
__global__
void Kmean( float *d_output, int k, int Data, float *d_point, int dimen, int*index, int *count){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < k){
for(int a = 0; a<Data; a++){
if(index[a] == tid)
atomicAdd(&count[tid],1);
}
for(int i = 0; i<Data; i++)
if(index[i] == tid){
for(int d =0; d<dimen; d++)
d_output[tid*dimen+d] += d_point[i*dimen+d] ;
}
for(int d = 0; d<dimen; d++){
if(count[tid] == 0)
d_output[tid*dimen+d] = d_point[tid*dimen+d];
else
d_output[tid*dimen+d] = d_output[tid*dimen+d]/count[tid] ;
}
}
}
int main(int argc, char **argv){
cudaEvent_t ICD_start, ICD_stop;
cudaEventCreate(&ICD_start);
cudaEventCreate(&ICD_stop);
cudaEvent_t RID_start, RID_stop;
cudaEventCreate(&RID_start);
cudaEventCreate(&RID_stop);
cudaEvent_t Kmean_start, Kmean_stop;
cudaEventCreate(&Kmean_start);
cudaEventCreate(&Kmean_stop);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEvent_t Kmean_Algor_start, Kmean_Algor_stop;
cudaEventCreate(&Kmean_Algor_start);
cudaEventCreate(&Kmean_Algor_stop);
srand(time(NULL));
if (argc < 2) {
fprintf(stderr, "Usage: %s dimension numData Cluster\n", argv[0]);
exit(1);
}
int k = atoi(argv[1]); //Cluster
printf("Value K: %d\n", k);
//---------------start reading data-------------------------
int *sample;
int numberOfData = 0;
FILE *file = fopen("test.txt", "r");
if(file == NULL){
printf("Error opening file!\n");
exit(1);
}
int ch;
int numberofLines =0;
while((ch = fgetc(file))!=EOF)
if(ch == '\n') numberofLines++;
fclose(file);
file = fopen("test.txt", "r");
while(fscanf(file, "%d ", &sample) != EOF){
numberOfData++;
}
fclose(file);
//Read the data from file
int Data = numberofLines;
int n = numberOfData;
int m = numberofLines;
int dimen = (n/m);
RandCluster *Points = new RandCluster[ Data];
for(int i = 0; i < Data; i++)
Points[i].vector = new float[dimen];
float *FixedData;
FixedData = (float*)malloc(sizeof(float)*Data*dimen);
file = fopen("test.txt","r");
float **temp;
temp = (float**)malloc(Data*sizeof(float*));
for(int i=0; i<Data; i++)
temp[i] = (float*)malloc(sizeof(float)*dimen);
while(!feof(file)){
for(int i = 0; i < Data ; i++){
for(int j = 0; j < dimen; j++){
fscanf(file,"%f ",& temp[i][j]);
Points[i].vector[j] = temp[i][j];
FixedData[j+i*dimen] = Points[i].vector[j];
Points[i].label = 0;
}
}
}
fclose(file);
//(----------------Read data ends------------------
Core_t *h_core;
float **ICD; // kxk matrix stnt c = 0; c < k; c++){
ICD = (float**)malloc(k*sizeof(float*));
for(int i = 0; i<k; i++)
ICD[i] = (float*)malloc(sizeof(float)*k);
for(int i = 0; i < k; i++)
for(int j = 0; j < k; j++)
ICD[i][j] = 0;
//initialize function set
h_core = (Core_t*)malloc(sizeof(Core_t)*k);
for(int i = 0; i < k ; i++)
h_core[i].center = (float*)malloc(sizeof(float)*dimen);
ICD_table *ICD_row = (ICD_table*)malloc(sizeof( ICD_table)*k);
for(int i = 0; i < k ; i++)
ICD_row[i].col = new float[k];
RID_table *RID_row = new RID_table[k];
for(int i = 0; i < k ; i++)
RID_row[i].col = new float[k];
for(int i=0; i<k; i++)
for(int j=0; j<k; j++){
ICD_row[i].col[j] = 0;
}
//-----------------Algorithm Two-----------------------------------------------
//---Initialize parts for algotrihtm two-----------------------------------------
float* d_output;
int *d_index;
int *h_count = (int*)malloc(sizeof(int)*k);
float *d_input;
float *h_output = new float[Data*dimen];
int *count;
int *zero = new int[k];
for(int i = 0; i < k; i++) zero[i] = 0;
int threadsPerblock = 32;
int numBlocks = (Data + threadsPerblock - 1) / threadsPerblock;
float *d_point;
int *initial = new int[Data];
int *d_Fixedlabel;
float *h_temp = new float[dimen];
float *d_temp;
float *T_inti = new float[dimen];
for(int i = 0; i < dimen; i++) T_inti[i] = 0;
int *compareLabel = new int[Data];
for(int i = 0; i < Data; i++) compareLabel[i] = 0;
int mm = 0;
for(int i = 0; i < k; i++){
for(int j = 0; j < Data/k; j++){
initial[j+i*Data/k] = mm;
}
mm++;
}
float *h_M =( float*)malloc(sizeof(float)*Data);
float *d_M ;
float *initial_M = new float[Data];
for(int i = 0; i < Data; i++)
initial_M[i] = 0;
float *h_sum_M = (float*)malloc(sizeof(float));
int *Fixedlabel = new int[Data];
//--------------------------device core define-------------------------------
Core_t *d_core;
Core_t *core = (Core_t*)malloc(sizeof(Core_t)*k);
float *d_center;
//--------------------------device core define-------------------------------
Core_t *d_core;
Core_t *core = (Core_t*)malloc(sizeof(Core_t)*k);
float *d_center;
//device matrix ICD and RID----
ICD_table *d_DDC;
ICD_table *DDC = (ICD_table*)malloc(sizeof(ICD_table)*k);
float *ICD_Value;
RID_table *d_RRD;
RID_table *RRD = (RID_table*)malloc(sizeof(RID_table)*k);
float *RRD_Value;
//------------algorithm two---------------------------------------------------
Getcentroid(dimen, Points ,h_core, k, Data);
int Jump = 0;
cudaEventRecord(start,0);
do{
cudaEventRecord(ICD_start, 0);
dimcalculate(h_core, ICD, k, dimen);
cudaEventRecord(ICD_stop, 0);
cudaEventRecord(RID_start, 0);
sorting(ICD_row, RID_row, k);
cudaEventRecord(RID_stop, 0);
for(int d = 0; d < k; d++){
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_core, k*sizeof(Core_t)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_center, dimen*sizeof(float)));
CUDA_SAFE_CALL( cudaMemcpy( d_center, h_core[d].center, dimen*sizeof(float), cudaMemcpyHostToDevice) );
core[d].label = h_core[d].label;
core[d].center = d_center;
CUDA_SAFE_CALL( cudaMemcpy( d_core, core, k*sizeof(Core_t), cudaMemcpyHostToDevice) );
}
for(int d = 0; d < k; d++){
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_DDC, k*sizeof(ICD_table)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_RRD, k*sizeof(RID_table)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &ICD_Value, k*sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &RRD_Value, k*sizeof(float)));
CUDA_SAFE_CALL( cudaMemcpy( ICD_Value, ICD_row[d].col, k*sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( RRD_Value, RID_row[d].col, k*sizeof(float), cudaMemcpyHostToDevice) );
DDC[d].col = ICD_Value;
RRD[d].col = RRD_Value;
CUDA_SAFE_CALL( cudaMemcpy( d_DDC, DDC, k*sizeof(ICD_table), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( d_RRD, RRD, k*sizeof(RID_table), cudaMemcpyHostToDevice) );
}
//---------------------------------------------------------------------------
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_M, Data*sizeof(int)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_Fixedlabel, Data*sizeof(int)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_temp, dimen*sizeof(int)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_point, Data*dimen*sizeof(float)));
CUDA_SAFE_CALL( cudaMemcpy( d_Fixedlabel, initial, Data*sizeof(int), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( d_point, FixedData, dimen*Data*sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( d_temp, T_inti, dimen*sizeof(int), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( d_M, h_M, Data*sizeof(float), cudaMemcpyHostToDevice) );
cudaEventRecord(Kmean_Algor_start, 0);
KmeanKernel<<<numBlocks, threadsPerblock, Data*sizeof(int)>>>(d_M,d_temp,d_core, d_point, d_DDC, d_RRD, k, dimen, d_Fixedlabel, Data);
cudaEventRecord(Kmean_Algor_stop, 0);
CUDA_SAFE_CALL( cudaMemcpy( Fixedlabel, d_Fixedlabel, Data*sizeof(int), cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy( h_M, d_M, Data*sizeof(float), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL( cudaMalloc( (void**) &count, k*sizeof(int)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_input, Data*dimen*sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_output, k*dimen*sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_index, Data*sizeof(int)));
CUDA_SAFE_CALL( cudaMemcpy( d_index, Fixedlabel, Data*sizeof(int), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( d_input, FixedData, Data*dimen*sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( count, zero, k*sizeof(int), cudaMemcpyHostToDevice) );
float sum_M = 0;
for(int i = 0; i < Data; i++){
sum_M += h_M[i];
}
cudaEventRecord(Kmean_start, 0);
Kmean<<<numBlocks, threadsPerblock>>>( d_output, k, Data, d_input, dimen, d_index, count);
cudaEventRecord(Kmean_stop, 0);
CUDA_SAFE_CALL( cudaMemcpy( h_output, d_output, k*dimen*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy( h_count, count, k*sizeof(float), cudaMemcpyDeviceToHost) );
for(int i =0; i < k; i++)
for(int j = 0; j < dimen; j++)
h_core[i].center[j] = h_output[i*dimen+j];
if(compareLabel == Fixedlabel) Jump++;
else Jump = 0;
if(Jump > 100) break;
compareLabel = Fixedlabel;
for(int i = 0; i < k; i ++){
if(h_count[i] == 0){
Getcentroid(dimen, Points ,h_core, k, Data);
}
}
} while(1);
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(ICD_start);
cudaEventSynchronize(RID_start);
cudaEventSynchronize(Kmean_start);
cudaEventSynchronize(Kmean_Algor_start);
float Total_time;
float ICD_time;
float RID_time;
float Kmean_Algor_time;
float Kmean_time;
cudaEventElapsedTime(&Total_time, start, stop);
cudaEventElapsedTime(&ICD_time, ICD_start, ICD_stop);
cudaEventElapsedTime(&RID_time, RID_start, RID_stop);
cudaEventElapsedTime(&Kmean_Algor_time, Kmean_Algor_start, Kmean_Algor_stop);
cudaEventElapsedTime(&Kmean_time, Kmean_start, Kmean_stop);
//for(int i = 0; i < k ; i++)
// printf("%d\t", h_count[i]);
printf("\n");
for(int i = 0; i< k; i++){
printf("Cluster center : [%d] ", i);
for(int j = 0; j < dimen; j++){
printf("%f\t", h_output[i*dimen+j]);
h_core[i].center[j] = h_output[i*dimen+j];
}
printf("\n");
}
printf("Total Processing time: %f\n", Total_time);
printf("ICD table processing time (one iteration): %f\n", ICD_time);
printf("RID table processing time (one iteration); %f\n", RID_time);
printf("Kmean Algorithm processing time (one iteration: %f\n", Kmean_Algor_time);
printf("Kmean calculation processing time (one iteration: %f\n", Kmean_time);
}
|
62befceaec8fbfca8b8b39400945ef42067df2e3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zhemv_mgpu.cu, normal z -> s, Sun Nov 20 20:20:31 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in ssymv (single GPU); why?
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_S_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_S_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L_mgpu
/***************************************************************************//**
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
/***************************************************************************//**
Purpose
-------
magmablas_ssymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a REAL array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) REAL array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a REAL array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
float alpha,
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
float const *x, magma_int_t incx,
float beta, // unused, see magmablas_ssymv_mgpu_sync
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
float const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
float *dx_dev = dwork[dev];
float *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_ssetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
hipLaunchKernelGGL(( ssymv_kernel_U_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( ssymv_kernel_U_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
hipLaunchKernelGGL(( ssymv_kernel_L_mgpu), dim3(grid), dim3(threads), 0, queues[dev]->cuda_stream() ,
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
hipLaunchKernelGGL(( ssymv_kernel_L_mgpu_sum), dim3(grid), dim3(threads_sum), 0, queues[dev]->cuda_stream() ,
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
float *dx_dev = dwork[dev];
magma_sgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_ssymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/***************************************************************************//**
Synchronizes and acculumates final ssymv result.
For convenience, the parameters are identical to magmablas_ssymv_mgpu
(though some are unused here).
@see magmablas_ssymv_mgpu
@ingroup magma_hemv
*******************************************************************************/
extern "C" magma_int_t
magmablas_ssymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_ssymv_mgpu
magma_int_t n,
float alpha, // unused
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
float const *x, magma_int_t incx, // unused
float beta,
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const float c_one = MAGMA_S_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_sscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_saxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
62befceaec8fbfca8b8b39400945ef42067df2e3.cu
|
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zhemv_mgpu.cu, normal z -> s, Sun Nov 20 20:20:31 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu(
int n,
float const * __restrict__ A, int lda,
float const * __restrict__ x, int incx,
float * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset )
{
#if (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// GPUs are renumbered so that GPU 0 starts with block 0, GPU 1 starts with block 1, etc.
if ( blk < my_gpu_id ) {
return;
}
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
float psum, psum_t;
float total = MAGMA_S_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ float sA [quarter_NB_X][NB_X + 2]; // TODO +3 used in ssymv (single GPU); why?
__shared__ float sx_blk[NB_X]; // for x[ blk ]
__shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
float rA[4];
float psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
// GPUs are renumbered so that GPU 0 has block 0, which is partial of offset.
if ( (partial && tx >= partial) ||
(blk == 0 /*&& my_gpu_id == 0*/ && tx < block_offset) ) {
sx_blk[tx] = MAGMA_S_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_S_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_S_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=my_gpu_id; jj < blk; jj += ngpu) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
// only the first block column (jj=0, on GPU 0) deals with offset
if ( ty == 0 ) {
if ( jj == 0 && tx < block_offset ) {
sx_jj[tx] = MAGMA_S_ZERO;
}
else {
sx_jj[tx] = x[jj*NB_X*incx];
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_S_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
// end ssymv_kernel_L_mgpu
/***************************************************************************//**
Lower case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ]
[ (A21*x1 + A22*x2 + A33*x3) ]
Note beta*y is not included here; see magmablas_ssymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * x x x x ] blk=0 * data for non-transposed row w_blk = A_{blk,1:blk} * x_{1:blk}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ * x x ] blk=2 blanks are not set
[ * ] blk=3
[ * ] blk=4
[ ] blk=0 (blank)
work[gpu=1] = [ * x x x ] blk=1
[ * ] blk=2
[ * x ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries left of the diagonal blocks are not accessed.
Blank rows, where a GPU has no data to contribute, are explicitly set to zero in y.
[ * + x + x + x ]
y[gpu=0] = [ * ]
[ * + x ]
[ * ]
[ 0 ] (explicitly set to 0)
y[gpu=1] = [ * + x + x ]
[ * ]
[ * ]
*******************************************************************************/
__global__ void
ssymv_kernel_L_mgpu_sum(
int n,
float alpha,
int lda,
float * __restrict__ y, int incy,
float const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
float Ax = MAGMA_S_ZERO;
// GPUs are renumbered so that GPU 0 starts with block 0,
// GPU 1 starts with block 1, etc.,
// therefore only blk >= my_gpu_id have non-zero data.
if ( blk >= my_gpu_id ) {
work += ind;
// if this GPU owns block-column blk, all blocks j=[blk, ..., blocks) contain data;
// else only block j=blk contains data.
int last = blocks-1;
if ( blk % ngpu != my_gpu_id ) {
last = blk;
}
for (int j = blk; j <= last; ++j) {
Ax += work[j*lda];
}
}
y[ind * incy] = alpha*Ax; // see magmablas_ssymv_sync for beta*y
}
}
// end ssymv_kernel_L_mgpu_sum
/***************************************************************************//**
Purpose
-------
magmablas_ssymv_mgpu performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced. **Not currently supported.**
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_lA Array of pointers, dimension (ngpu), to block-column distributed
matrix A, with block size nb.
d_lA[dev] is a REAL array on GPU dev, of
dimension (LDDA, nlocal), where
\n
{ floor(n/nb/ngpu)*nb + nb if dev < floor(n/nb) % ngpu,
nlocal = { floor(n/nb/ngpu)*nb + n%nb if dev == floor(n/nb) % ngpu,
{ floor(n/nb/ngpu)*nb otherwise.
\n
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
offset INTEGER.
Row & column offset to start of matrix A within the distributed d_lA
structure. Note that N is the size of this multiply, excluding the
offset, so the size of the original parent matrix is N+offset.
Also, x and y do not have an offset.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n + offset ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
y REAL array **on the CPU** (not the GPU), of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param
hwork (workspace) REAL array on the CPU, of dimension (lhwork).
@param[in]
lhwork INTEGER.
The dimension of the array hwork. lhwork >= ngpu*nb.
@param
dwork (workspaces) Array of pointers, dimension (ngpu), to workspace on each GPU.
dwork[dev] is a REAL array on GPU dev, of dimension (ldwork).
@param[in]
ldwork INTEGER.
The dimension of each array dwork[dev].
ldwork >= ldda*( ceil((n + offset % nb) / nb) + 1 ).
@param[in]
ngpu INTEGER.
The number of GPUs to use.
@param[in]
nb INTEGER.
The block size used for distributing d_lA. Must be 64.
@param[in]
queues magma_queue_t array of dimension (ngpu).
queues[dev] is an execution queue on GPU dev.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_ssymv_mgpu(
magma_uplo_t uplo,
magma_int_t n,
float alpha,
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset,
float const *x, magma_int_t incx,
float beta, // unused, see magmablas_ssymv_mgpu_sync
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork,
magma_int_t ngpu,
magma_int_t nb,
magma_queue_t queues[] )
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// no CUDA ARCH 1.x version
fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ );
return MAGMA_ERR_NOT_SUPPORTED;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
bool upper = (uplo == MagmaUpper);
magma_int_t offset_block_id = offset / NB_X;
magma_int_t offset_gpu_id = offset_block_id % ngpu;
magma_int_t block_offset = offset % NB_X;
magma_int_t blocks = magma_ceildiv( n + block_offset, NB_X );
magma_int_t ldwmin = ldda*(blocks + 1);
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( (! upper) && (uplo != MagmaLower) ) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
} else if ( ldwork < ldwmin ) {
info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
magma_int_t dev;
for (dev=0; dev < ngpu; dev++) {
magma_setdevice( dev );
// blocks before the offset block
magma_int_t num_blocks_skipped = offset_block_id / ngpu;
if ( dev < offset_gpu_id ) {
num_blocks_skipped += 1;
}
// shift dA to first block >= offset block that is owned by this GPU
float const *dA_dev = d_lA[dev] + offset_block_id*NB_X + num_blocks_skipped*NB_X*ldda;
// first column of dwork is to broadcast x to all GPUs.
// remaining blocks number of columns is for partial sums from
// each block, as in single GPU version.
float *dx_dev = dwork[dev];
float *dwork_dev = dwork[dev] + ldda;
// renumber GPUs starting from the offset block
magma_int_t new_gpu_id = (dev + ngpu - offset_gpu_id) % ngpu;
dim3 grid( blocks, 1 );
// copy x to each GPU
magma_ssetvector_async( n, x, incx, dx_dev + block_offset, 1, queues[dev] );
// perform work = A*x, partial row sums
dim3 threads( NB_X, NB_Y );
// perform w = sum( work ), larger partial row sums
dim3 threads_sum( NB_X, 1 );
if ( upper ) {
ssymv_kernel_U_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
ssymv_kernel_U_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
else {
ssymv_kernel_L_mgpu<<< grid, threads, 0, queues[dev]->cuda_stream() >>>(
n, dA_dev, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
ssymv_kernel_L_mgpu_sum<<< grid, threads_sum, 0, queues[dev]->cuda_stream() >>>(
n, alpha, ldda, dx_dev, 1, dwork_dev,
new_gpu_id, ngpu, block_offset );
}
}
// 2nd loop in case hwork is not pinned, causing this to be sync instead of async.
for (dev=0; dev < ngpu; dev++) {
// copy w to CPU
magma_setdevice( dev );
float *dx_dev = dwork[dev];
magma_sgetvector_async( n, dx_dev + block_offset, 1, &hwork[dev*n], 1, queues[dev] );
}
// see magmablas_ssymv_mgpu_sync for final row sums
magma_setdevice( orig_dev );
return info;
}
/***************************************************************************//**
Synchronizes and acculumates final ssymv result.
For convenience, the parameters are identical to magmablas_ssymv_mgpu
(though some are unused here).
@see magmablas_ssymv_mgpu
@ingroup magma_hemv
*******************************************************************************/
extern "C" magma_int_t
magmablas_ssymv_mgpu_sync(
magma_uplo_t uplo, // unused, see magmablas_ssymv_mgpu
magma_int_t n,
float alpha, // unused
magmaFloat_const_ptr const d_lA[], magma_int_t ldda,
magma_int_t offset, // unused
float const *x, magma_int_t incx, // unused
float beta,
float *y, magma_int_t incy, // unused
float *hwork, magma_int_t lhwork,
magmaFloat_ptr dwork[], magma_int_t ldwork, // unused
magma_int_t ngpu,
magma_int_t nb, // unused
magma_queue_t queues[] )
{
const float c_one = MAGMA_S_ONE;
const magma_int_t ione = 1;
magma_device_t dev;
magma_int_t lhwmin = n*ngpu;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
//if ( (! upper) && (uplo != MagmaLower) ) { // unused
// info = -1;
//} else
if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1,n+offset) ) {
info = -5;
} else if ( offset < 0 ) {
info = -6;
} else if ( incx == 0 ) {
info = -8;
} else if ( incy == 0 ) {
info = -11;
} else if ( lhwork < lhwmin ) {
info = -13;
//} else if ( ldwork < ldwmin ) { // unused
// info = -15;
} else if ( ngpu < 1 ) {
info = -16;
} else if ( nb != NB_X ) {
info = -17;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( n == 0 )
return info;
magma_device_t orig_dev;
magma_getdevice( &orig_dev );
// scale y = beta*y
blasf77_sscal( &n, &beta, y, &incy );
// sum reduce, y += sum( hwork )
for (dev=0; dev < ngpu; ++dev) {
magma_setdevice( dev );
magma_queue_sync( queues[dev] );
blasf77_saxpy( &n, &c_one, &hwork[dev*n], &ione, y, &ione );
}
magma_setdevice( orig_dev );
return info;
}
|
84d952df94862b2a08d46562912a682527b57aff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vector_gamma.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
REAL *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vector_gamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vector_gamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vector_gamma), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
84d952df94862b2a08d46562912a682527b57aff.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vector_gamma.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
REAL *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vector_gamma<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vector_gamma<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vector_gamma<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
76f0e9d0e74fae1bb41976163c6c6cb63a174466.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "cube.cuh"
#include "cuda_util.h"
__global__ void cube_core(int *dev_a, int *dev_b){
int tid=blockIdx.x;
int tmp=*(dev_a+tid);
*(dev_b+tid)=tmp*tmp*tmp;
}
void cube(int result[], int n){
int a[n];
for(int i=0;i<n;i++){
a[i]=i;
}
int *dev_a=NULL;
int *dev_b=NULL;
hipMalloc((void**)&dev_a,n*sizeof(int));
hipMemset((void**)&dev_a,0,n*sizeof(int));
hipMalloc((void**)&dev_b,n*sizeof(int));
hipMemset((void**)&dev_b,0,n*sizeof(int));
hipMemcpy(dev_a,(void**)&a,n*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cube_core), dim3(n),dim3(1), 0, 0, dev_a,dev_b);
hipMemcpy((void **)&result[0],dev_b,n*sizeof(int),hipMemcpyDeviceToHost);
}
|
76f0e9d0e74fae1bb41976163c6c6cb63a174466.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "cube.cuh"
#include "cuda_util.h"
__global__ void cube_core(int *dev_a, int *dev_b){
int tid=blockIdx.x;
int tmp=*(dev_a+tid);
*(dev_b+tid)=tmp*tmp*tmp;
}
void cube(int result[], int n){
int a[n];
for(int i=0;i<n;i++){
a[i]=i;
}
int *dev_a=NULL;
int *dev_b=NULL;
cudaMalloc((void**)&dev_a,n*sizeof(int));
cudaMemset((void**)&dev_a,0,n*sizeof(int));
cudaMalloc((void**)&dev_b,n*sizeof(int));
cudaMemset((void**)&dev_b,0,n*sizeof(int));
cudaMemcpy(dev_a,(void**)&a,n*sizeof(int),cudaMemcpyHostToDevice);
cube_core<<<n,1>>>(dev_a,dev_b);
cudaMemcpy((void **)&result[0],dev_b,n*sizeof(int),cudaMemcpyDeviceToHost);
}
|
1272f95201acb420484bb5e16bde19a43019d960.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MeshData.h"
#include <fstream>
#include <thrust/extrema.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include "KNearestPoint.h"
#include "Helpers/xUtils.h"
__global__ void SampleNodeKernel(float4* node, int nodeNum, float4* vertex, int vertexNum,
int step, int startIdx, int baseIdx)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nodeNum)
{
int sampledIdx = int(startIdx + idx * step);
sampledIdx = sampledIdx - sampledIdx / vertexNum * vertexNum; // sampledIdx % vertexNum;
node[idx] = vertex[sampledIdx + baseIdx];
}
}
__global__ void SampleVertexIdxKernel(int* sampledVertexIdx, int sampledVertexNum, int vertexNum,
int step, int startIdx, int baseIdx)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < sampledVertexNum)
{
int sampledIdx = int(startIdx + idx * step);
sampledIdx = sampledIdx - sampledIdx / vertexNum * vertexNum; // sampledIdx % vertexNum;
sampledVertexIdx[idx] = sampledIdx + baseIdx;
}
}
__global__ void ComputeV2NDistKernel(float* weight, int* indices,
float4* srcPoints, int srcVertexNum,
float4* targetPoints, int K)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= srcVertexNum)
return;
float4 *srcPoint, *targetPoint;
float x, y, z;
srcPoint = srcPoints + idx;
for (int i = 0; i < K; ++i)
{
targetPoint = targetPoints + indices[idx * K + i];
x = (srcPoint->x - targetPoint->x);
y = (srcPoint->y - targetPoint->y);
z = (srcPoint->z - targetPoint->z);
*(weight + idx * K + i) = sqrt(x * x + y * y + z * z);
}
}
__global__ void AddRelaIdxBaseKernel(int* indices, int baseIdx, int num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= num)
return;
indices[idx] = indices[idx] + baseIdx;
}
__global__ void DistToWeightV2NKernel(float* vertexToNodeDistDevice, float varianceInv, int vertexNum)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= vertexNum)
return;
float nearDist0, nearDist1, nearDist2, nearDist3;
float* distPtr;
distPtr = vertexToNodeDistDevice + 4 * idx;
nearDist0 = *distPtr;
nearDist1 = *(distPtr + 1);
nearDist2 = *(distPtr + 2);
nearDist3 = *(distPtr + 3);
nearDist0 = exp(-nearDist0 * nearDist0 * varianceInv) + MYEPS;
nearDist1 = exp(-nearDist1 * nearDist1 * varianceInv) + MYEPS;
nearDist2 = exp(-nearDist2 * nearDist2 * varianceInv) + MYEPS;
nearDist3 = exp(-nearDist3 * nearDist3 * varianceInv) + MYEPS;
float sum = nearDist0 + nearDist1 + nearDist2 + nearDist3;
assert(!isnan(nearDist0 / sum));
assert(!isnan(nearDist1 / sum));
assert(!isnan(nearDist2 / sum));
assert(!isnan(nearDist3 / sum));
*distPtr = nearDist0 / sum;
*(distPtr + 1) = nearDist1 / sum;
*(distPtr + 2) = nearDist2 / sum;
*(distPtr + 3) = nearDist3 / sum;
}
__global__ void DistToWeightN2NKernel(float* nodeToNodeDist, int nodeNum)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= nodeNum)
return;
float* distPtr;
distPtr = nodeToNodeDist + 8 * idx;
*distPtr = sqrt(1.0 / 8);
*(distPtr + 1) = sqrt(1.0 / 8);
*(distPtr + 2) = sqrt(1.0 / 8);
*(distPtr + 3) = sqrt(1.0 / 8);
*(distPtr + 4) = sqrt(1.0 / 8);
*(distPtr + 5) = sqrt(1.0 / 8);
*(distPtr + 6) = sqrt(1.0 / 8);
*(distPtr + 7) = sqrt(1.0 / 8);
}
MeshData::MeshData()
: m_vertexNum(0),
m_nodeNum(0),
m_maxVertexNum(MAX_VERTEX_NUM),
m_maxNodeNum(MAX_FRAG_NUM * NODE_NUM_EACH_FRAG)
{
// Reserve
m_dVertexVec.reserve(m_maxVertexNum);
m_dNormalVec.reserve(m_maxVertexNum);
// Resize
m_dVertexVec.resize(m_maxVertexNum);
m_dNormalVec.resize(m_maxVertexNum);
}
MeshData::~MeshData()
{
m_dVertexVec.clear();
m_dNormalVec.clear();
}
DeformableMeshData::DeformableMeshData()
: MeshData(),
m_sigma(0)
{
// Reserve
m_dVertexVec.reserve(m_maxVertexNum);
m_dNormalVec.reserve(m_maxVertexNum);
m_dNodeVec.reserve(m_maxNodeNum);
m_dVertexRelaIdxVec.reserve(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dVertexRelaWeightVec.reserve(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dNodeRelaIdxVec.reserve(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
m_dNodeRelaWeightVec.reserve(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
// Resize
m_dVertexVec.resize(m_maxVertexNum);
m_dNormalVec.resize(m_maxVertexNum);
m_dNodeVec.resize(m_maxNodeNum);
m_dVertexRelaIdxVec.resize(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dVertexRelaWeightVec.resize(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dNodeRelaIdxVec.resize(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
m_dNodeRelaWeightVec.resize(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
m_flannKnn = new NearestPoint;
}
DeformableMeshData::~DeformableMeshData()
{
m_dVertexVec.clear();
m_dNormalVec.clear();
m_dNodeVec.clear();
m_dVertexRelaIdxVec.clear();
m_dVertexRelaWeightVec.clear();
m_dNodeRelaIdxVec.clear();
m_dNodeRelaWeightVec.clear();
delete m_flannKnn;
}
FragDeformableMeshData::FragDeformableMeshData()
: DeformableMeshData(),
m_fragNum(0),
m_sampledVertexNum(0),
m_maxFragNum(MAX_FRAG_NUM),
m_maxSampledVertexNum(SAMPLED_VERTEX_NUM_EACH_FRAG * MAX_FRAG_NUM)
{
// Reserve
m_vertexStrideVec.reserve(m_maxFragNum);
m_dSampledVertexIdxVec.reserve(m_maxSampledVertexNum);;
m_dMatchingPointsIdxVec.reserve(2 * MAX_CLOSURE_NUM_EACH_FRAG * SAMPLED_VERTEX_NUM_EACH_FRAG * MAX_FRAG_NUM);
m_dNewSampledVertexIdxVec.reserve(SAMPLED_VERTEX_NUM_EACH_FRAG);
m_dNewNodeVec.reserve(NODE_NUM_EACH_FRAG);
// Resize
m_vertexStrideVec.resize(m_maxFragNum);
m_dSampledVertexIdxVec.resize(m_maxSampledVertexNum);;
m_dMatchingPointsIdxVec.resize(2 * MAX_CLOSURE_NUM_EACH_FRAG * SAMPLED_VERTEX_NUM_EACH_FRAG * MAX_FRAG_NUM);
m_dNewSampledVertexIdxVec.resize(SAMPLED_VERTEX_NUM_EACH_FRAG);
m_dNewNodeVec.resize(NODE_NUM_EACH_FRAG);
m_vertexStrideVec[0] = 0;
}
FragDeformableMeshData::~FragDeformableMeshData()
{
m_vertexStrideVec.clear();
m_dNewSampledVertexIdxVec.clear();
m_dNewNodeVec.clear();
}
void DeformableMeshData::addNewNode(float4* dNewNode, int newNodeNum)
{
if ((m_nodeNum + newNodeNum) > m_maxNodeNum)
{
std::cout << "error: node num exceed limit" << std::endl;
std::exit(0);
}
checkCudaErrors(hipMemcpy(RAW_PTR(m_dNodeVec) + m_nodeNum,
dNewNode, sizeof(float4)*newNodeNum, hipMemcpyDeviceToDevice));
m_nodeNum += newNodeNum;
}
void FragDeformableMeshData::addNewFrag(int vertexNum)
{
m_vertexNum = vertexNum;
++m_fragNum;
m_vertexStrideVec[m_fragNum] = vertexNum;
}
void FragDeformableMeshData::addNewVertexIdx(int* dNewVertexIdx, int newVertexNum)
{
if ((m_sampledVertexNum + newVertexNum) > m_maxSampledVertexNum)
{
std::cout << "error: sampled vertex num exceed limit" << std::endl;
std::exit(0);
}
checkCudaErrors(hipMemcpy(RAW_PTR(m_dSampledVertexIdxVec) + m_sampledVertexNum,
dNewVertexIdx, sizeof(int)*newVertexNum, hipMemcpyDeviceToDevice));
m_sampledVertexNum += newVertexNum;
}
void FragDeformableMeshData::sampleNewNodeAndVertexIdx()
{
const int currentFragIdx = m_fragNum - 1;
const int vertexBaseIdx = m_vertexStrideVec[currentFragIdx];
const int vertexNumThisFrag = m_vertexStrideVec[currentFragIdx + 1] - m_vertexStrideVec[currentFragIdx];
int startIdx = 9999 % vertexNumThisFrag;
int step = vertexNumThisFrag / (SAMPLED_VERTEX_NUM_EACH_FRAG + MYEPS);
int block = 256, grid = DivUp(SAMPLED_VERTEX_NUM_EACH_FRAG, block);
SampleVertexIdxKernel << <grid, block >> >(RAW_PTR(m_dNewSampledVertexIdxVec), SAMPLED_VERTEX_NUM_EACH_FRAG,
vertexNumThisFrag, step, startIdx, vertexBaseIdx);
addNewVertexIdx(RAW_PTR(m_dNewSampledVertexIdxVec), SAMPLED_VERTEX_NUM_EACH_FRAG);
startIdx = 19999 % vertexNumThisFrag;
step = vertexNumThisFrag / (NODE_NUM_EACH_FRAG + MYEPS);
block = 256, grid = DivUp(NODE_NUM_EACH_FRAG, block);
SampleNodeKernel << <grid, block >> >(RAW_PTR(m_dNewNodeVec), NODE_NUM_EACH_FRAG,
RAW_PTR(m_dVertexVec), vertexNumThisFrag, step, startIdx, vertexBaseIdx);
#if 0
thrust::host_vector<float4> newNodeVec = m_dNewNodeVec;
std::cout << "sampled new node: " << std::endl;
for (int i = 0; i < 16; ++i)
{
std::cout << newNodeVec[i].x << " : " << newNodeVec[i].y << " : " << newNodeVec[i].z << std::endl;
}
#endif
addNewNode(RAW_PTR(m_dNewNodeVec), NODE_NUM_EACH_FRAG);
#if 0
thrust::host_vector<float4> nodeVec = m_dNodeVec;
std::cout << "sampled node: " << std::endl;
for (int i = 0; i < 32; ++i)
{
std::cout << nodeVec[i].x << " : " << nodeVec[i].y << " : " << nodeVec[i].z << std::endl;
}
#endif
}
void FragDeformableMeshData::getVertexAndNodeRelation()
{
const int currentFragIdx = m_fragNum - 1;
const int vertexNumThisFrag = m_vertexStrideVec[currentFragIdx + 1] - m_vertexStrideVec[currentFragIdx];
const int vertexBaseIdx = m_vertexStrideVec[currentFragIdx];
const int nodeBaseIdx = currentFragIdx * NODE_NUM_EACH_FRAG;
try
{
m_flannKnn->clear();
m_flannKnn->InitKDTree(RAW_PTR(m_dNodeVec) + nodeBaseIdx, NODE_NUM_EACH_FRAG);
m_flannKnn->GetKnnResult(RAW_PTR(m_dVertexRelaIdxVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexRelaWeightVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexVec) + vertexBaseIdx,
vertexNumThisFrag, MAX_NEAR_NODE_NUM_VERTEX);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
m_flannKnn->GetKnnResult(RAW_PTR(m_dNodeRelaIdxVec) + 8 * nodeBaseIdx,
RAW_PTR(m_dNodeRelaWeightVec) + 8 * nodeBaseIdx,
RAW_PTR(m_dNodeVec) + nodeBaseIdx,
NODE_NUM_EACH_FRAG, MAX_NEAR_NODE_NUM_NODE);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
}
catch (std::bad_alloc& e)
{
std::cout << "Error in KD-tree: Run out of memory" << std::endl;
exit(0);
}
catch (thrust::system_error& e)
{
std::cout << "Error in KD-tree: " << e.what() << std::endl;
exit(0);
}
int block = 256, grid = DivUp(vertexNumThisFrag, block);
// For the distance of the kd-tree may be unaccurate
ComputeV2NDistKernel << <grid, block >> >(RAW_PTR(m_dVertexRelaWeightVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexRelaIdxVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexVec) + vertexBaseIdx, vertexNumThisFrag,
RAW_PTR(m_dNodeVec) + nodeBaseIdx, MAX_NEAR_NODE_NUM_VERTEX);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
#if 0
std::cout << "-------------" << std::endl;
std::cout << "vertexBaseIdx: " << vertexBaseIdx << std::endl;
std::cout << "vertexNumThisFrag: " << vertexNumThisFrag << std::endl;
std::cout << "-------------" << std::endl;
#endif
// calculate weight exp(||v-g||^2/(2*sigma^2)), then normalize
const float sum = thrust::reduce(m_dVertexRelaWeightVec.begin() + 4 * vertexBaseIdx,
m_dVertexRelaWeightVec.begin() + 4 * vertexBaseIdx + 4 * vertexNumThisFrag, (float)0, thrust::plus<float>());
assert(sum > MYEPS);
const float variance = 2 * pow(0.5 * sum / (vertexNumThisFrag * 4), 2); // variance of gaussian
block = 256, grid = DivUp(4 * vertexNumThisFrag, block);
AddRelaIdxBaseKernel << <grid, block >> >(RAW_PTR(m_dVertexRelaIdxVec) + 4 * vertexBaseIdx,
currentFragIdx * NODE_NUM_EACH_FRAG, 4 * vertexNumThisFrag);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
block = 256, grid = DivUp(8 * NODE_NUM_EACH_FRAG, block);
AddRelaIdxBaseKernel << <grid, block >> >(RAW_PTR(m_dNodeRelaIdxVec) + 8 * nodeBaseIdx,
currentFragIdx * NODE_NUM_EACH_FRAG, 8 * NODE_NUM_EACH_FRAG);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
block = 256, grid = DivUp(vertexNumThisFrag, block);
DistToWeightV2NKernel << <grid, block >> >(RAW_PTR(m_dVertexRelaWeightVec) + 4 * vertexBaseIdx, 1.0f / variance, vertexNumThisFrag);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
block = 256, grid = DivUp(NODE_NUM_EACH_FRAG, block);
DistToWeightN2NKernel << <grid, block >> >(RAW_PTR(m_dNodeRelaWeightVec) + 8 * nodeBaseIdx, NODE_NUM_EACH_FRAG);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
}
|
1272f95201acb420484bb5e16bde19a43019d960.cu
|
#include "MeshData.h"
#include <fstream>
#include <thrust/extrema.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include "KNearestPoint.h"
#include "Helpers/xUtils.h"
__global__ void SampleNodeKernel(float4* node, int nodeNum, float4* vertex, int vertexNum,
int step, int startIdx, int baseIdx)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nodeNum)
{
int sampledIdx = int(startIdx + idx * step);
sampledIdx = sampledIdx - sampledIdx / vertexNum * vertexNum; // sampledIdx % vertexNum;
node[idx] = vertex[sampledIdx + baseIdx];
}
}
__global__ void SampleVertexIdxKernel(int* sampledVertexIdx, int sampledVertexNum, int vertexNum,
int step, int startIdx, int baseIdx)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < sampledVertexNum)
{
int sampledIdx = int(startIdx + idx * step);
sampledIdx = sampledIdx - sampledIdx / vertexNum * vertexNum; // sampledIdx % vertexNum;
sampledVertexIdx[idx] = sampledIdx + baseIdx;
}
}
__global__ void ComputeV2NDistKernel(float* weight, int* indices,
float4* srcPoints, int srcVertexNum,
float4* targetPoints, int K)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= srcVertexNum)
return;
float4 *srcPoint, *targetPoint;
float x, y, z;
srcPoint = srcPoints + idx;
for (int i = 0; i < K; ++i)
{
targetPoint = targetPoints + indices[idx * K + i];
x = (srcPoint->x - targetPoint->x);
y = (srcPoint->y - targetPoint->y);
z = (srcPoint->z - targetPoint->z);
*(weight + idx * K + i) = sqrt(x * x + y * y + z * z);
}
}
__global__ void AddRelaIdxBaseKernel(int* indices, int baseIdx, int num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= num)
return;
indices[idx] = indices[idx] + baseIdx;
}
__global__ void DistToWeightV2NKernel(float* vertexToNodeDistDevice, float varianceInv, int vertexNum)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= vertexNum)
return;
float nearDist0, nearDist1, nearDist2, nearDist3;
float* distPtr;
distPtr = vertexToNodeDistDevice + 4 * idx;
nearDist0 = *distPtr;
nearDist1 = *(distPtr + 1);
nearDist2 = *(distPtr + 2);
nearDist3 = *(distPtr + 3);
nearDist0 = exp(-nearDist0 * nearDist0 * varianceInv) + MYEPS;
nearDist1 = exp(-nearDist1 * nearDist1 * varianceInv) + MYEPS;
nearDist2 = exp(-nearDist2 * nearDist2 * varianceInv) + MYEPS;
nearDist3 = exp(-nearDist3 * nearDist3 * varianceInv) + MYEPS;
float sum = nearDist0 + nearDist1 + nearDist2 + nearDist3;
assert(!isnan(nearDist0 / sum));
assert(!isnan(nearDist1 / sum));
assert(!isnan(nearDist2 / sum));
assert(!isnan(nearDist3 / sum));
*distPtr = nearDist0 / sum;
*(distPtr + 1) = nearDist1 / sum;
*(distPtr + 2) = nearDist2 / sum;
*(distPtr + 3) = nearDist3 / sum;
}
__global__ void DistToWeightN2NKernel(float* nodeToNodeDist, int nodeNum)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= nodeNum)
return;
float* distPtr;
distPtr = nodeToNodeDist + 8 * idx;
*distPtr = sqrt(1.0 / 8);
*(distPtr + 1) = sqrt(1.0 / 8);
*(distPtr + 2) = sqrt(1.0 / 8);
*(distPtr + 3) = sqrt(1.0 / 8);
*(distPtr + 4) = sqrt(1.0 / 8);
*(distPtr + 5) = sqrt(1.0 / 8);
*(distPtr + 6) = sqrt(1.0 / 8);
*(distPtr + 7) = sqrt(1.0 / 8);
}
MeshData::MeshData()
: m_vertexNum(0),
m_nodeNum(0),
m_maxVertexNum(MAX_VERTEX_NUM),
m_maxNodeNum(MAX_FRAG_NUM * NODE_NUM_EACH_FRAG)
{
// Reserve
m_dVertexVec.reserve(m_maxVertexNum);
m_dNormalVec.reserve(m_maxVertexNum);
// Resize
m_dVertexVec.resize(m_maxVertexNum);
m_dNormalVec.resize(m_maxVertexNum);
}
MeshData::~MeshData()
{
m_dVertexVec.clear();
m_dNormalVec.clear();
}
DeformableMeshData::DeformableMeshData()
: MeshData(),
m_sigma(0)
{
// Reserve
m_dVertexVec.reserve(m_maxVertexNum);
m_dNormalVec.reserve(m_maxVertexNum);
m_dNodeVec.reserve(m_maxNodeNum);
m_dVertexRelaIdxVec.reserve(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dVertexRelaWeightVec.reserve(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dNodeRelaIdxVec.reserve(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
m_dNodeRelaWeightVec.reserve(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
// Resize
m_dVertexVec.resize(m_maxVertexNum);
m_dNormalVec.resize(m_maxVertexNum);
m_dNodeVec.resize(m_maxNodeNum);
m_dVertexRelaIdxVec.resize(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dVertexRelaWeightVec.resize(m_maxVertexNum * MAX_NEAR_NODE_NUM_VERTEX);
m_dNodeRelaIdxVec.resize(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
m_dNodeRelaWeightVec.resize(m_maxNodeNum * MAX_NEAR_NODE_NUM_NODE);
m_flannKnn = new NearestPoint;
}
DeformableMeshData::~DeformableMeshData()
{
m_dVertexVec.clear();
m_dNormalVec.clear();
m_dNodeVec.clear();
m_dVertexRelaIdxVec.clear();
m_dVertexRelaWeightVec.clear();
m_dNodeRelaIdxVec.clear();
m_dNodeRelaWeightVec.clear();
delete m_flannKnn;
}
FragDeformableMeshData::FragDeformableMeshData()
: DeformableMeshData(),
m_fragNum(0),
m_sampledVertexNum(0),
m_maxFragNum(MAX_FRAG_NUM),
m_maxSampledVertexNum(SAMPLED_VERTEX_NUM_EACH_FRAG * MAX_FRAG_NUM)
{
// Reserve
m_vertexStrideVec.reserve(m_maxFragNum);
m_dSampledVertexIdxVec.reserve(m_maxSampledVertexNum);;
m_dMatchingPointsIdxVec.reserve(2 * MAX_CLOSURE_NUM_EACH_FRAG * SAMPLED_VERTEX_NUM_EACH_FRAG * MAX_FRAG_NUM);
m_dNewSampledVertexIdxVec.reserve(SAMPLED_VERTEX_NUM_EACH_FRAG);
m_dNewNodeVec.reserve(NODE_NUM_EACH_FRAG);
// Resize
m_vertexStrideVec.resize(m_maxFragNum);
m_dSampledVertexIdxVec.resize(m_maxSampledVertexNum);;
m_dMatchingPointsIdxVec.resize(2 * MAX_CLOSURE_NUM_EACH_FRAG * SAMPLED_VERTEX_NUM_EACH_FRAG * MAX_FRAG_NUM);
m_dNewSampledVertexIdxVec.resize(SAMPLED_VERTEX_NUM_EACH_FRAG);
m_dNewNodeVec.resize(NODE_NUM_EACH_FRAG);
m_vertexStrideVec[0] = 0;
}
FragDeformableMeshData::~FragDeformableMeshData()
{
m_vertexStrideVec.clear();
m_dNewSampledVertexIdxVec.clear();
m_dNewNodeVec.clear();
}
void DeformableMeshData::addNewNode(float4* dNewNode, int newNodeNum)
{
if ((m_nodeNum + newNodeNum) > m_maxNodeNum)
{
std::cout << "error: node num exceed limit" << std::endl;
std::exit(0);
}
checkCudaErrors(cudaMemcpy(RAW_PTR(m_dNodeVec) + m_nodeNum,
dNewNode, sizeof(float4)*newNodeNum, cudaMemcpyDeviceToDevice));
m_nodeNum += newNodeNum;
}
void FragDeformableMeshData::addNewFrag(int vertexNum)
{
m_vertexNum = vertexNum;
++m_fragNum;
m_vertexStrideVec[m_fragNum] = vertexNum;
}
void FragDeformableMeshData::addNewVertexIdx(int* dNewVertexIdx, int newVertexNum)
{
if ((m_sampledVertexNum + newVertexNum) > m_maxSampledVertexNum)
{
std::cout << "error: sampled vertex num exceed limit" << std::endl;
std::exit(0);
}
checkCudaErrors(cudaMemcpy(RAW_PTR(m_dSampledVertexIdxVec) + m_sampledVertexNum,
dNewVertexIdx, sizeof(int)*newVertexNum, cudaMemcpyDeviceToDevice));
m_sampledVertexNum += newVertexNum;
}
void FragDeformableMeshData::sampleNewNodeAndVertexIdx()
{
const int currentFragIdx = m_fragNum - 1;
const int vertexBaseIdx = m_vertexStrideVec[currentFragIdx];
const int vertexNumThisFrag = m_vertexStrideVec[currentFragIdx + 1] - m_vertexStrideVec[currentFragIdx];
int startIdx = 9999 % vertexNumThisFrag;
int step = vertexNumThisFrag / (SAMPLED_VERTEX_NUM_EACH_FRAG + MYEPS);
int block = 256, grid = DivUp(SAMPLED_VERTEX_NUM_EACH_FRAG, block);
SampleVertexIdxKernel << <grid, block >> >(RAW_PTR(m_dNewSampledVertexIdxVec), SAMPLED_VERTEX_NUM_EACH_FRAG,
vertexNumThisFrag, step, startIdx, vertexBaseIdx);
addNewVertexIdx(RAW_PTR(m_dNewSampledVertexIdxVec), SAMPLED_VERTEX_NUM_EACH_FRAG);
startIdx = 19999 % vertexNumThisFrag;
step = vertexNumThisFrag / (NODE_NUM_EACH_FRAG + MYEPS);
block = 256, grid = DivUp(NODE_NUM_EACH_FRAG, block);
SampleNodeKernel << <grid, block >> >(RAW_PTR(m_dNewNodeVec), NODE_NUM_EACH_FRAG,
RAW_PTR(m_dVertexVec), vertexNumThisFrag, step, startIdx, vertexBaseIdx);
#if 0
thrust::host_vector<float4> newNodeVec = m_dNewNodeVec;
std::cout << "sampled new node: " << std::endl;
for (int i = 0; i < 16; ++i)
{
std::cout << newNodeVec[i].x << " : " << newNodeVec[i].y << " : " << newNodeVec[i].z << std::endl;
}
#endif
addNewNode(RAW_PTR(m_dNewNodeVec), NODE_NUM_EACH_FRAG);
#if 0
thrust::host_vector<float4> nodeVec = m_dNodeVec;
std::cout << "sampled node: " << std::endl;
for (int i = 0; i < 32; ++i)
{
std::cout << nodeVec[i].x << " : " << nodeVec[i].y << " : " << nodeVec[i].z << std::endl;
}
#endif
}
void FragDeformableMeshData::getVertexAndNodeRelation()
{
const int currentFragIdx = m_fragNum - 1;
const int vertexNumThisFrag = m_vertexStrideVec[currentFragIdx + 1] - m_vertexStrideVec[currentFragIdx];
const int vertexBaseIdx = m_vertexStrideVec[currentFragIdx];
const int nodeBaseIdx = currentFragIdx * NODE_NUM_EACH_FRAG;
try
{
m_flannKnn->clear();
m_flannKnn->InitKDTree(RAW_PTR(m_dNodeVec) + nodeBaseIdx, NODE_NUM_EACH_FRAG);
m_flannKnn->GetKnnResult(RAW_PTR(m_dVertexRelaIdxVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexRelaWeightVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexVec) + vertexBaseIdx,
vertexNumThisFrag, MAX_NEAR_NODE_NUM_VERTEX);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
m_flannKnn->GetKnnResult(RAW_PTR(m_dNodeRelaIdxVec) + 8 * nodeBaseIdx,
RAW_PTR(m_dNodeRelaWeightVec) + 8 * nodeBaseIdx,
RAW_PTR(m_dNodeVec) + nodeBaseIdx,
NODE_NUM_EACH_FRAG, MAX_NEAR_NODE_NUM_NODE);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
}
catch (std::bad_alloc& e)
{
std::cout << "Error in KD-tree: Run out of memory" << std::endl;
exit(0);
}
catch (thrust::system_error& e)
{
std::cout << "Error in KD-tree: " << e.what() << std::endl;
exit(0);
}
int block = 256, grid = DivUp(vertexNumThisFrag, block);
// For the distance of the kd-tree may be unaccurate
ComputeV2NDistKernel << <grid, block >> >(RAW_PTR(m_dVertexRelaWeightVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexRelaIdxVec) + 4 * vertexBaseIdx,
RAW_PTR(m_dVertexVec) + vertexBaseIdx, vertexNumThisFrag,
RAW_PTR(m_dNodeVec) + nodeBaseIdx, MAX_NEAR_NODE_NUM_VERTEX);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
#if 0
std::cout << "-------------" << std::endl;
std::cout << "vertexBaseIdx: " << vertexBaseIdx << std::endl;
std::cout << "vertexNumThisFrag: " << vertexNumThisFrag << std::endl;
std::cout << "-------------" << std::endl;
#endif
// calculate weight exp(||v-g||^2/(2*sigma^2)), then normalize
const float sum = thrust::reduce(m_dVertexRelaWeightVec.begin() + 4 * vertexBaseIdx,
m_dVertexRelaWeightVec.begin() + 4 * vertexBaseIdx + 4 * vertexNumThisFrag, (float)0, thrust::plus<float>());
assert(sum > MYEPS);
const float variance = 2 * pow(0.5 * sum / (vertexNumThisFrag * 4), 2); // variance of gaussian
block = 256, grid = DivUp(4 * vertexNumThisFrag, block);
AddRelaIdxBaseKernel << <grid, block >> >(RAW_PTR(m_dVertexRelaIdxVec) + 4 * vertexBaseIdx,
currentFragIdx * NODE_NUM_EACH_FRAG, 4 * vertexNumThisFrag);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
block = 256, grid = DivUp(8 * NODE_NUM_EACH_FRAG, block);
AddRelaIdxBaseKernel << <grid, block >> >(RAW_PTR(m_dNodeRelaIdxVec) + 8 * nodeBaseIdx,
currentFragIdx * NODE_NUM_EACH_FRAG, 8 * NODE_NUM_EACH_FRAG);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
block = 256, grid = DivUp(vertexNumThisFrag, block);
DistToWeightV2NKernel << <grid, block >> >(RAW_PTR(m_dVertexRelaWeightVec) + 4 * vertexBaseIdx, 1.0f / variance, vertexNumThisFrag);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
block = 256, grid = DivUp(NODE_NUM_EACH_FRAG, block);
DistToWeightN2NKernel << <grid, block >> >(RAW_PTR(m_dNodeRelaWeightVec) + 8 * nodeBaseIdx, NODE_NUM_EACH_FRAG);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
}
|
f10c8bd0bf834e600c656b0a49b16afbdbde1bc2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kExtractPatches.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *images = NULL;
hipMalloc(&images, XSIZE*YSIZE);
float *patches = NULL;
hipMalloc(&patches, XSIZE*YSIZE);
float *indices = NULL;
hipMalloc(&indices, XSIZE*YSIZE);
float *width_offset = NULL;
hipMalloc(&width_offset, XSIZE*YSIZE);
float *height_offset = NULL;
hipMalloc(&height_offset, XSIZE*YSIZE);
int num_images = 1;
int img_width = XSIZE;
int img_height = YSIZE;
int patch_width = XSIZE;
int patch_height = YSIZE;
int num_colors = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kExtractPatches), dim3(gridBlock),dim3(threadBlock), 0, 0, images,patches,indices,width_offset,height_offset,num_images,img_width,img_height,patch_width,patch_height,num_colors);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kExtractPatches), dim3(gridBlock),dim3(threadBlock), 0, 0, images,patches,indices,width_offset,height_offset,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kExtractPatches), dim3(gridBlock),dim3(threadBlock), 0, 0, images,patches,indices,width_offset,height_offset,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f10c8bd0bf834e600c656b0a49b16afbdbde1bc2.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kExtractPatches.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *images = NULL;
cudaMalloc(&images, XSIZE*YSIZE);
float *patches = NULL;
cudaMalloc(&patches, XSIZE*YSIZE);
float *indices = NULL;
cudaMalloc(&indices, XSIZE*YSIZE);
float *width_offset = NULL;
cudaMalloc(&width_offset, XSIZE*YSIZE);
float *height_offset = NULL;
cudaMalloc(&height_offset, XSIZE*YSIZE);
int num_images = 1;
int img_width = XSIZE;
int img_height = YSIZE;
int patch_width = XSIZE;
int patch_height = YSIZE;
int num_colors = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kExtractPatches<<<gridBlock,threadBlock>>>(images,patches,indices,width_offset,height_offset,num_images,img_width,img_height,patch_width,patch_height,num_colors);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kExtractPatches<<<gridBlock,threadBlock>>>(images,patches,indices,width_offset,height_offset,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kExtractPatches<<<gridBlock,threadBlock>>>(images,patches,indices,width_offset,height_offset,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1411783823a9089efbf5f4a9a38dfd242ed90fe4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=1024 --blockDim=512
#include "common.h"
__global__ void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint dir
)
{
__requires(arrayLength == 64);
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < arrayLength; size <<= 1)
{
//Bitonic merge
uint ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (uint stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//ddd == dir for the last bitonic merge step
{
for (uint stride = arrayLength / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
|
1411783823a9089efbf5f4a9a38dfd242ed90fe4.cu
|
//pass
//--gridDim=1024 --blockDim=512
#include "common.h"
__global__ void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint dir
)
{
__requires(arrayLength == 64);
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint size = 2; size < arrayLength; size <<= 1)
{
//Bitonic merge
uint ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (uint stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
}
//ddd == dir for the last bitonic merge step
{
for (uint stride = arrayLength / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
|
95a0ed8943a6b805800ee20cd363cf00e94d8e33.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <cmeans.h>
#include <cmeanscu.h>
#include <float.h>
/*
* Raises a float to a integer power using a loop and multiplication
* Much faster than the generic pow(float,float) from math.h
*/
__device__ float ipow(float val, int power) {
float tmp = val;
for(int i=0; i < power-1; i++) {
tmp *= val;
}
return tmp;
}
__device__ float parallelSum(float* data, const unsigned int ndata) {
const unsigned int tid = threadIdx.x;
float t;
__syncthreads();
// Butterfly sum. ndata MUST be a power of 2.
for(unsigned int bit = ndata >> 1; bit > 0; bit >>= 1) {
t = data[tid] + data[tid^bit]; __syncthreads();
data[tid] = t; __syncthreads();
}
return data[tid];
}
/*
* Computes centers with a MxD grid
*/
__global__ void UpdateClusterCentersGPU(const float* oldClusters, const float* events, float* newClusters, float* memberships) {
float membershipValue;//, denominator;
int d = blockIdx.y;
int event_matrix_offset = NUM_EVENTS*d;
int membership_matrix_offset = NUM_EVENTS*blockIdx.x;
__shared__ float numerators[NUM_THREADS_UPDATE];
// Sum of the memberships computed by each thread
// The sum of all of these denominators together is effectively the size of the cluster
__shared__ float denominators[NUM_THREADS_UPDATE];
int tid = threadIdx.x;
// initialize numerators and denominators to 0
denominators[tid] = 0;
numerators[tid] = 0;
__syncthreads();
// Compute new membership value for each event
// Add its contribution to the numerator and denominator for that thread
for(int j = tid; j < NUM_EVENTS; j+=NUM_THREADS_UPDATE){
membershipValue = memberships[membership_matrix_offset + j];
numerators[tid] += events[event_matrix_offset + j]*membershipValue;
denominators[tid] += membershipValue;
}
__syncthreads();
if(tid == 0){
// Sum up the numerator/denominator, one for this block
for(int j = 1; j < NUM_THREADS_UPDATE; j++){
numerators[0] += numerators[j];
}
for(int j = 1; j < NUM_THREADS_UPDATE; j++){
denominators[0] += denominators[j];
}
// Set the new center for this block
newClusters[blockIdx.x*NUM_DIMENSIONS + d] = numerators[0]/denominators[0];
}
}
/*
* Computes numerators of the centers with a M/B x D grid, where B is the number of clusters per block
*
* This should be more efficient because it only acceses event data M/B times, rather than M times
* Shared memory limits B to 15, but 4 seems to be ideal for performance (still has good 50+% occupacy)
*/
__global__ void UpdateClusterCentersGPU2(const float* oldClusters, const float* events, float* newClusters, float* memberships) {
float membershipValue;
float eventValue;
// Compute cluster range for this block
int c_start = blockIdx.x*NUM_CLUSTERS_PER_BLOCK;
int num_c = NUM_CLUSTERS_PER_BLOCK;
// Handle boundary condition
if(blockIdx.x == gridDim.x-1 && NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_c = NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK;
}
// Dimension index
int d = blockIdx.y;
int event_matrix_offset = NUM_EVENTS*d;
__shared__ float numerators[NUM_THREADS_UPDATE*NUM_CLUSTERS_PER_BLOCK];
int tid = threadIdx.x;
// initialize numerators and denominators to 0
for(int c = 0; c < num_c; c++) {
numerators[c*NUM_THREADS_UPDATE+tid] = 0;
}
// Compute new membership value for each event
// Add its contribution to the numerator and denominator for that thread
for(int j = tid; j < NUM_EVENTS; j+=NUM_THREADS_UPDATE){
eventValue = events[event_matrix_offset + j];
for(int c = 0; c < num_c; c++) {
membershipValue = memberships[(c+c_start)*NUM_EVENTS + j];
numerators[c*NUM_THREADS_UPDATE+tid] += eventValue*membershipValue;
}
}
__syncthreads();
for(int c = 0; c < num_c; c++) {
numerators[c*NUM_THREADS_UPDATE+tid] = parallelSum(&numerators[NUM_THREADS_UPDATE*c],NUM_THREADS_UPDATE);
}
__syncthreads();
if(tid == 0){
for(int c = 0; c < num_c; c++) {
// Set the new center for this block
newClusters[(c+c_start)*NUM_DIMENSIONS + d] = numerators[c*NUM_THREADS_UPDATE];
}
}
}
__global__ void ComputeDistanceMatrix(const float* clusters, const float* events, float* matrix) {
// copy the relavant center for this block into shared memory
__shared__ float center[NUM_DIMENSIONS];
for(int j = threadIdx.x; j < NUM_DIMENSIONS; j+=NUM_THREADS_DISTANCE){
center[j] = clusters[blockIdx.y*NUM_DIMENSIONS+j];
}
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < NUM_EVENTS) {
matrix[blockIdx.y*NUM_EVENTS+i] = CalculateDistanceGPU(center,events,blockIdx.y,i);
}
}
__global__ void ComputeDistanceMatrixNoShared(float* clusters, const float* events, float* matrix) {
float* center = &clusters[blockIdx.y*NUM_DIMENSIONS];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < NUM_EVENTS) {
matrix[blockIdx.y*NUM_EVENTS+i] = CalculateDistanceGPU(center,events,blockIdx.y,i);
}
}
__global__ void ComputeMembershipMatrix(float* distances, float* memberships) {
float membershipValue;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// For each event
if(i < NUM_EVENTS) {
membershipValue = MembershipValueGPU(blockIdx.y, i, distances);
#if FUZZINESS_SQUARE
// This is much faster than the pow function
membershipValue = membershipValue*membershipValue;
#else
membershipValue = __powf(membershipValue,FUZZINESS)+1e-30;
#endif
memberships[blockIdx.y*NUM_EVENTS+i] = membershipValue;
}
}
__global__ void ComputeMembershipMatrixLinear(float* distances) {
float membershipValue;
float denom = 0.0f;
float dist;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// For each event
if(i < NUM_EVENTS) {
for(int c=0; c < NUM_CLUSTERS; c++) {
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
#endif
denom += 1.0f / dist;
}
for(int c=0; c < NUM_CLUSTERS; c++) {
// not enough shared memory to store an array of distances
// for each thread, so just recompute them like above
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
membershipValue = 1.0f/(dist*denom); // u
membershipValue *= membershipValue; // u^p, p=2
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
membershipValue = 1.0f/(dist*denom); // u
membershipValue = __powf(membershipValue,FUZZINESS); // u^p
#endif
distances[c*NUM_EVENTS+i] = membershipValue;
}
}
}
__global__ void ComputeNormalizedMembershipMatrix(float* distances, float* memberships) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < NUM_EVENTS) {
memberships[blockIdx.y*NUM_EVENTS+i] = MembershipValueGPU(blockIdx.y, i, distances);
}
}
__global__ void ComputeNormalizedMembershipMatrixLinear(float* distances) {
float membershipValue;
float denom = 0.0f;
float dist;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// For each event
if(i < NUM_EVENTS) {
for(int c=0; c < NUM_CLUSTERS; c++) {
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
#endif
denom += 1.0f / dist;
}
for(int c=0; c < NUM_CLUSTERS; c++) {
// not enough shared memory to store an array of distances
// for each thread, so just recompute them like above
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
membershipValue = 1.0f/(dist*denom); // u
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
membershipValue = 1.0f/(dist*denom); // u
#endif
distances[c*NUM_EVENTS+i] = membershipValue;
}
}
}
__device__ float MembershipValueGPU(int clusterIndex, int eventIndex, const float* distanceMatrix){
float myClustDist = 0.0f;
// Compute the distance from this event to the given cluster
myClustDist = distanceMatrix[clusterIndex*NUM_EVENTS+eventIndex];
float sum = 0.0f;
float otherClustDist;
for(int j = 0; j< NUM_CLUSTERS; j++){
otherClustDist = distanceMatrix[j*NUM_EVENTS+eventIndex];
#if FUZZINESS_SQUARE
sum += (myClustDist/otherClustDist)*(myClustDist/otherClustDist);
#else
sum += __powf((myClustDist/otherClustDist),(2.0f/(FUZZINESS-1.0f)));
#endif
//sum += ipow(myClustDist/otherClustDist,2/(FUZZINESS-1));
}
return 1.0f/sum;
}
__global__ void ComputeClusterSizes(float* memberships, float* sizes) {
__shared__ float partial_sums[512];
partial_sums[threadIdx.x] = 0.0f;
for(int i=threadIdx.x; i < NUM_EVENTS; i += 512) {
partial_sums[threadIdx.x] += memberships[blockIdx.x*NUM_EVENTS+i];
}
__syncthreads();
float sum = parallelSum(partial_sums,512);
__syncthreads();
if(threadIdx.x) {
sizes[blockIdx.x] = sum;
}
}
__device__ float MembershipValueDist(int clusterIndex, int eventIndex, float distance, float* distanceMatrix){
float sum =0.0f;
float otherClustDist;
for(int j = 0; j< NUM_CLUSTERS; j++){
otherClustDist = distanceMatrix[j*NUM_EVENTS+eventIndex];
#if FUZZINESS_SQUARE
sum += (distance/otherClustDist)*(distance/otherClustDist);
#else
sum += __powf((distance/otherClustDist),(2.0f/(FUZZINESS-1.0f)));
#endif
//sum += ipow((distance/otherClustDist),2/(FUZZINESS-1));
}
return 1.0f/sum;
}
__device__ float CalculateDistanceGPU(const float* center, const float* events, int clusterIndex, int eventIndex){
float sum = 0;
float tmp;
#if DISTANCE_MEASURE == 0 // Euclidean
#pragma unroll 1 // Prevent compiler from unrolling this loop, eats up too many registers
for(int i = 0; i < NUM_DIMENSIONS; i++){
tmp = events[i*NUM_EVENTS+eventIndex] - center[i];
//tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += tmp*tmp;
}
//sum = sqrt(sum);
sum = sqrt(sum+1e-30);
#endif
#if DISTANCE_MEASURE == 1 // Absolute value
#pragma unroll 1 // Prevent compiler from unrolling this loop, eats up too many registers
for(int i = 0; i < NUM_DIMENSIONS; i++){
tmp = events[i*NUM_EVENTS+eventIndex] - center[i];
//tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += abs(tmp)+1e-30;
}
#endif
#if DISTANCE_MEASURE == 2 // Maximum distance
#pragma unroll 1 // Prevent compiler from unrolling this loop, eats up too many registers
for(int i = 0; i < NUM_DIMENSIONS; i++){
tmp = abs(events[i*NUM_EVENTS + eventIndex] - center[i]);
//tmp = abs(events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]);
if(tmp > sum)
sum = tmp+1e-30;
}
#endif
return sum;
}
__device__ float CalculateQII(const float* events, int cluster_index_I, float* EI, float* numMem, float* distanceMatrix){
EI[threadIdx.x] = 0;
numMem[threadIdx.x] = 0;
for(int i = threadIdx.x; i < NUM_EVENTS; i+=Q_THREADS){
float distance = distanceMatrix[cluster_index_I*NUM_EVENTS+i];
float memVal = MembershipValueDist(cluster_index_I, i, distance, distanceMatrix);
if(memVal > MEMBER_THRESH){
EI[threadIdx.x] += memVal*memVal * distance*distance;
numMem[threadIdx.x]++;
}
}
__syncthreads();
if(threadIdx.x == 0){
for(int i = 1; i < Q_THREADS; i++){
EI[0] += EI[i];
numMem[0] += numMem[i];
}
}
__syncthreads();
return ((((float)K1) * numMem[0]) - (((float)K2) * EI[0]) - (((float)K3) * NUM_DIMENSIONS));
}
__device__ float CalculateQIJ(const float* events, int cluster_index_I, int cluster_index_J, float * EI, float * EJ, float *numMem, float* distanceMatrix){
EI[threadIdx.x] = 0;
EJ[threadIdx.x] = 0;
numMem[threadIdx.x] = 0;
for(int i = threadIdx.x; i < NUM_EVENTS; i+=Q_THREADS){
float distance = distanceMatrix[cluster_index_I*NUM_EVENTS+i];
float memValI = MembershipValueDist(cluster_index_I, i, distance, distanceMatrix);
if(memValI > MEMBER_THRESH){
EI[threadIdx.x] += memValI*memValI * distance*distance;
}
distance = distanceMatrix[cluster_index_J*NUM_EVENTS+i];
float memValJ = MembershipValueDist(cluster_index_J, i, distance, distanceMatrix);
if(memValJ > MEMBER_THRESH){
EJ[threadIdx.x] += memValJ*memValJ * distance*distance;
}
if(memValI > MEMBER_THRESH && memValJ > MEMBER_THRESH){
numMem[threadIdx.x]++;
}
}
__syncthreads();
if(threadIdx.x == 0){
for(int i = 1; i < Q_THREADS; i++){
EI[0] += EI[i];
EJ[0] += EJ[i];
numMem[0] += numMem[i];
}
}
__syncthreads();
float EB = (EI[0] > EJ[0]) ? EI[0] : EJ[0];
return ((-1*((float)K1)*numMem[0]) + ((float)K2)*EB);
}
__global__ void CalculateQMatrixGPUUpgrade(const float* events, const float* clusters, float* matrix, float* distanceMatrix){
__shared__ float EI[Q_THREADS];
__shared__ float EJ[Q_THREADS];
__shared__ float numMem[Q_THREADS];
if(blockIdx.x == blockIdx.y){
matrix[blockIdx.x*NUM_CLUSTERS + blockIdx.y ] = CalculateQII(events, blockIdx.x, EI, numMem, distanceMatrix);
}
else{
matrix[blockIdx.x*NUM_CLUSTERS + blockIdx.y] = CalculateQIJ(events, blockIdx.x, blockIdx.y, EI, EJ, numMem, distanceMatrix);
}
}
|
95a0ed8943a6b805800ee20cd363cf00e94d8e33.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <cmeans.h>
#include <cmeanscu.h>
#include <float.h>
/*
* Raises a float to a integer power using a loop and multiplication
* Much faster than the generic pow(float,float) from math.h
*/
__device__ float ipow(float val, int power) {
float tmp = val;
for(int i=0; i < power-1; i++) {
tmp *= val;
}
return tmp;
}
__device__ float parallelSum(float* data, const unsigned int ndata) {
const unsigned int tid = threadIdx.x;
float t;
__syncthreads();
// Butterfly sum. ndata MUST be a power of 2.
for(unsigned int bit = ndata >> 1; bit > 0; bit >>= 1) {
t = data[tid] + data[tid^bit]; __syncthreads();
data[tid] = t; __syncthreads();
}
return data[tid];
}
/*
* Computes centers with a MxD grid
*/
__global__ void UpdateClusterCentersGPU(const float* oldClusters, const float* events, float* newClusters, float* memberships) {
float membershipValue;//, denominator;
int d = blockIdx.y;
int event_matrix_offset = NUM_EVENTS*d;
int membership_matrix_offset = NUM_EVENTS*blockIdx.x;
__shared__ float numerators[NUM_THREADS_UPDATE];
// Sum of the memberships computed by each thread
// The sum of all of these denominators together is effectively the size of the cluster
__shared__ float denominators[NUM_THREADS_UPDATE];
int tid = threadIdx.x;
// initialize numerators and denominators to 0
denominators[tid] = 0;
numerators[tid] = 0;
__syncthreads();
// Compute new membership value for each event
// Add its contribution to the numerator and denominator for that thread
for(int j = tid; j < NUM_EVENTS; j+=NUM_THREADS_UPDATE){
membershipValue = memberships[membership_matrix_offset + j];
numerators[tid] += events[event_matrix_offset + j]*membershipValue;
denominators[tid] += membershipValue;
}
__syncthreads();
if(tid == 0){
// Sum up the numerator/denominator, one for this block
for(int j = 1; j < NUM_THREADS_UPDATE; j++){
numerators[0] += numerators[j];
}
for(int j = 1; j < NUM_THREADS_UPDATE; j++){
denominators[0] += denominators[j];
}
// Set the new center for this block
newClusters[blockIdx.x*NUM_DIMENSIONS + d] = numerators[0]/denominators[0];
}
}
/*
* Computes numerators of the centers with a M/B x D grid, where B is the number of clusters per block
*
* This should be more efficient because it only acceses event data M/B times, rather than M times
* Shared memory limits B to 15, but 4 seems to be ideal for performance (still has good 50+% occupacy)
*/
__global__ void UpdateClusterCentersGPU2(const float* oldClusters, const float* events, float* newClusters, float* memberships) {
float membershipValue;
float eventValue;
// Compute cluster range for this block
int c_start = blockIdx.x*NUM_CLUSTERS_PER_BLOCK;
int num_c = NUM_CLUSTERS_PER_BLOCK;
// Handle boundary condition
if(blockIdx.x == gridDim.x-1 && NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_c = NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK;
}
// Dimension index
int d = blockIdx.y;
int event_matrix_offset = NUM_EVENTS*d;
__shared__ float numerators[NUM_THREADS_UPDATE*NUM_CLUSTERS_PER_BLOCK];
int tid = threadIdx.x;
// initialize numerators and denominators to 0
for(int c = 0; c < num_c; c++) {
numerators[c*NUM_THREADS_UPDATE+tid] = 0;
}
// Compute new membership value for each event
// Add its contribution to the numerator and denominator for that thread
for(int j = tid; j < NUM_EVENTS; j+=NUM_THREADS_UPDATE){
eventValue = events[event_matrix_offset + j];
for(int c = 0; c < num_c; c++) {
membershipValue = memberships[(c+c_start)*NUM_EVENTS + j];
numerators[c*NUM_THREADS_UPDATE+tid] += eventValue*membershipValue;
}
}
__syncthreads();
for(int c = 0; c < num_c; c++) {
numerators[c*NUM_THREADS_UPDATE+tid] = parallelSum(&numerators[NUM_THREADS_UPDATE*c],NUM_THREADS_UPDATE);
}
__syncthreads();
if(tid == 0){
for(int c = 0; c < num_c; c++) {
// Set the new center for this block
newClusters[(c+c_start)*NUM_DIMENSIONS + d] = numerators[c*NUM_THREADS_UPDATE];
}
}
}
__global__ void ComputeDistanceMatrix(const float* clusters, const float* events, float* matrix) {
// copy the relavant center for this block into shared memory
__shared__ float center[NUM_DIMENSIONS];
for(int j = threadIdx.x; j < NUM_DIMENSIONS; j+=NUM_THREADS_DISTANCE){
center[j] = clusters[blockIdx.y*NUM_DIMENSIONS+j];
}
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < NUM_EVENTS) {
matrix[blockIdx.y*NUM_EVENTS+i] = CalculateDistanceGPU(center,events,blockIdx.y,i);
}
}
__global__ void ComputeDistanceMatrixNoShared(float* clusters, const float* events, float* matrix) {
float* center = &clusters[blockIdx.y*NUM_DIMENSIONS];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < NUM_EVENTS) {
matrix[blockIdx.y*NUM_EVENTS+i] = CalculateDistanceGPU(center,events,blockIdx.y,i);
}
}
__global__ void ComputeMembershipMatrix(float* distances, float* memberships) {
float membershipValue;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// For each event
if(i < NUM_EVENTS) {
membershipValue = MembershipValueGPU(blockIdx.y, i, distances);
#if FUZZINESS_SQUARE
// This is much faster than the pow function
membershipValue = membershipValue*membershipValue;
#else
membershipValue = __powf(membershipValue,FUZZINESS)+1e-30;
#endif
memberships[blockIdx.y*NUM_EVENTS+i] = membershipValue;
}
}
__global__ void ComputeMembershipMatrixLinear(float* distances) {
float membershipValue;
float denom = 0.0f;
float dist;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// For each event
if(i < NUM_EVENTS) {
for(int c=0; c < NUM_CLUSTERS; c++) {
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
#endif
denom += 1.0f / dist;
}
for(int c=0; c < NUM_CLUSTERS; c++) {
// not enough shared memory to store an array of distances
// for each thread, so just recompute them like above
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
membershipValue = 1.0f/(dist*denom); // u
membershipValue *= membershipValue; // u^p, p=2
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
membershipValue = 1.0f/(dist*denom); // u
membershipValue = __powf(membershipValue,FUZZINESS); // u^p
#endif
distances[c*NUM_EVENTS+i] = membershipValue;
}
}
}
__global__ void ComputeNormalizedMembershipMatrix(float* distances, float* memberships) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < NUM_EVENTS) {
memberships[blockIdx.y*NUM_EVENTS+i] = MembershipValueGPU(blockIdx.y, i, distances);
}
}
__global__ void ComputeNormalizedMembershipMatrixLinear(float* distances) {
float membershipValue;
float denom = 0.0f;
float dist;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// For each event
if(i < NUM_EVENTS) {
for(int c=0; c < NUM_CLUSTERS; c++) {
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
#endif
denom += 1.0f / dist;
}
for(int c=0; c < NUM_CLUSTERS; c++) {
// not enough shared memory to store an array of distances
// for each thread, so just recompute them like above
dist = distances[c*NUM_EVENTS+i];
#if FUZZINESS_SQUARE
dist = dist*dist;
membershipValue = 1.0f/(dist*denom); // u
#else
dist = __powf(dist,2.0f/(FUZZINESS-1.0f))+1e-30;
membershipValue = 1.0f/(dist*denom); // u
#endif
distances[c*NUM_EVENTS+i] = membershipValue;
}
}
}
__device__ float MembershipValueGPU(int clusterIndex, int eventIndex, const float* distanceMatrix){
float myClustDist = 0.0f;
// Compute the distance from this event to the given cluster
myClustDist = distanceMatrix[clusterIndex*NUM_EVENTS+eventIndex];
float sum = 0.0f;
float otherClustDist;
for(int j = 0; j< NUM_CLUSTERS; j++){
otherClustDist = distanceMatrix[j*NUM_EVENTS+eventIndex];
#if FUZZINESS_SQUARE
sum += (myClustDist/otherClustDist)*(myClustDist/otherClustDist);
#else
sum += __powf((myClustDist/otherClustDist),(2.0f/(FUZZINESS-1.0f)));
#endif
//sum += ipow(myClustDist/otherClustDist,2/(FUZZINESS-1));
}
return 1.0f/sum;
}
__global__ void ComputeClusterSizes(float* memberships, float* sizes) {
__shared__ float partial_sums[512];
partial_sums[threadIdx.x] = 0.0f;
for(int i=threadIdx.x; i < NUM_EVENTS; i += 512) {
partial_sums[threadIdx.x] += memberships[blockIdx.x*NUM_EVENTS+i];
}
__syncthreads();
float sum = parallelSum(partial_sums,512);
__syncthreads();
if(threadIdx.x) {
sizes[blockIdx.x] = sum;
}
}
__device__ float MembershipValueDist(int clusterIndex, int eventIndex, float distance, float* distanceMatrix){
float sum =0.0f;
float otherClustDist;
for(int j = 0; j< NUM_CLUSTERS; j++){
otherClustDist = distanceMatrix[j*NUM_EVENTS+eventIndex];
#if FUZZINESS_SQUARE
sum += (distance/otherClustDist)*(distance/otherClustDist);
#else
sum += __powf((distance/otherClustDist),(2.0f/(FUZZINESS-1.0f)));
#endif
//sum += ipow((distance/otherClustDist),2/(FUZZINESS-1));
}
return 1.0f/sum;
}
__device__ float CalculateDistanceGPU(const float* center, const float* events, int clusterIndex, int eventIndex){
float sum = 0;
float tmp;
#if DISTANCE_MEASURE == 0 // Euclidean
#pragma unroll 1 // Prevent compiler from unrolling this loop, eats up too many registers
for(int i = 0; i < NUM_DIMENSIONS; i++){
tmp = events[i*NUM_EVENTS+eventIndex] - center[i];
//tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += tmp*tmp;
}
//sum = sqrt(sum);
sum = sqrt(sum+1e-30);
#endif
#if DISTANCE_MEASURE == 1 // Absolute value
#pragma unroll 1 // Prevent compiler from unrolling this loop, eats up too many registers
for(int i = 0; i < NUM_DIMENSIONS; i++){
tmp = events[i*NUM_EVENTS+eventIndex] - center[i];
//tmp = events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i];
sum += abs(tmp)+1e-30;
}
#endif
#if DISTANCE_MEASURE == 2 // Maximum distance
#pragma unroll 1 // Prevent compiler from unrolling this loop, eats up too many registers
for(int i = 0; i < NUM_DIMENSIONS; i++){
tmp = abs(events[i*NUM_EVENTS + eventIndex] - center[i]);
//tmp = abs(events[eventIndex*NUM_DIMENSIONS + i] - clusters[clusterIndex*NUM_DIMENSIONS + i]);
if(tmp > sum)
sum = tmp+1e-30;
}
#endif
return sum;
}
__device__ float CalculateQII(const float* events, int cluster_index_I, float* EI, float* numMem, float* distanceMatrix){
EI[threadIdx.x] = 0;
numMem[threadIdx.x] = 0;
for(int i = threadIdx.x; i < NUM_EVENTS; i+=Q_THREADS){
float distance = distanceMatrix[cluster_index_I*NUM_EVENTS+i];
float memVal = MembershipValueDist(cluster_index_I, i, distance, distanceMatrix);
if(memVal > MEMBER_THRESH){
EI[threadIdx.x] += memVal*memVal * distance*distance;
numMem[threadIdx.x]++;
}
}
__syncthreads();
if(threadIdx.x == 0){
for(int i = 1; i < Q_THREADS; i++){
EI[0] += EI[i];
numMem[0] += numMem[i];
}
}
__syncthreads();
return ((((float)K1) * numMem[0]) - (((float)K2) * EI[0]) - (((float)K3) * NUM_DIMENSIONS));
}
__device__ float CalculateQIJ(const float* events, int cluster_index_I, int cluster_index_J, float * EI, float * EJ, float *numMem, float* distanceMatrix){
EI[threadIdx.x] = 0;
EJ[threadIdx.x] = 0;
numMem[threadIdx.x] = 0;
for(int i = threadIdx.x; i < NUM_EVENTS; i+=Q_THREADS){
float distance = distanceMatrix[cluster_index_I*NUM_EVENTS+i];
float memValI = MembershipValueDist(cluster_index_I, i, distance, distanceMatrix);
if(memValI > MEMBER_THRESH){
EI[threadIdx.x] += memValI*memValI * distance*distance;
}
distance = distanceMatrix[cluster_index_J*NUM_EVENTS+i];
float memValJ = MembershipValueDist(cluster_index_J, i, distance, distanceMatrix);
if(memValJ > MEMBER_THRESH){
EJ[threadIdx.x] += memValJ*memValJ * distance*distance;
}
if(memValI > MEMBER_THRESH && memValJ > MEMBER_THRESH){
numMem[threadIdx.x]++;
}
}
__syncthreads();
if(threadIdx.x == 0){
for(int i = 1; i < Q_THREADS; i++){
EI[0] += EI[i];
EJ[0] += EJ[i];
numMem[0] += numMem[i];
}
}
__syncthreads();
float EB = (EI[0] > EJ[0]) ? EI[0] : EJ[0];
return ((-1*((float)K1)*numMem[0]) + ((float)K2)*EB);
}
__global__ void CalculateQMatrixGPUUpgrade(const float* events, const float* clusters, float* matrix, float* distanceMatrix){
__shared__ float EI[Q_THREADS];
__shared__ float EJ[Q_THREADS];
__shared__ float numMem[Q_THREADS];
if(blockIdx.x == blockIdx.y){
matrix[blockIdx.x*NUM_CLUSTERS + blockIdx.y ] = CalculateQII(events, blockIdx.x, EI, numMem, distanceMatrix);
}
else{
matrix[blockIdx.x*NUM_CLUSTERS + blockIdx.y] = CalculateQIJ(events, blockIdx.x, blockIdx.y, EI, EJ, numMem, distanceMatrix);
}
}
|
c3f678faafe8a5b0c8d077c66c7fb772afc088c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
//#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ];
dot += val * dx[ col*num_vecs+idy ];
}
if (betazero) {
dy[ row+idy*num_rows ] = dot*alpha;
} else {
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const magmaDoubleComplex * __restrict__ dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
if (betazero) {
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
} else {
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc = hipCreateChannelDesc(32, 32, 32, 32,
hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(double);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = int( sqrt( double( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( magmaDoubleComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = int( sqrt( double( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
|
c3f678faafe8a5b0c8d077c66c7fb772afc088c9.cu
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
//#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ];
dot += val * dx[ col*num_vecs+idy ];
}
if (betazero) {
dy[ row+idy*num_rows ] = dot*alpha;
} else {
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const magmaDoubleComplex * __restrict__ dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
if (betazero) {
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
} else {
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ magmaDoubleComplex shared[];
if (row < num_rows ) {
magmaDoubleComplex dot1 = MAGMA_Z_MAKE(0.0, 0.0);
magmaDoubleComplex dot2 = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2double(v.y, v.x);
dot2 += val * __hiloint2double(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc = cudaCreateChannelDesc(32, 32, 32, 32,
cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(double);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = int( sqrt( double( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( magmaDoubleComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D_tex<true><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_1_3D_tex<false><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_4_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_8_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_16_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_32_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = int( sqrt( double( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D<true><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_1_3D<false><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_4_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_8_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_16_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_Z_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_32_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
|
eedd94bb7a95793ea2a7e7bff08c5faba251bc25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void addVectorsMask(float *A, float *B, float *C, int size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i!= size)
return;
C[i] = A[i] + B[i];
}
void addVectors(float *A, float *B, float *C, int size)
{
float *devPtrA = 0,*devPtrB = 0,*devPtrC = 0;
hipMalloc(&devPtrA,sizeof(float)* size);
hipMalloc(&devPtrB,sizeof(float)* size);
hipMalloc(&devPtrC,sizeof(float)* size);
hipMemcpy(devPtrA,A, sizeof(float)* size, hipMemcpyHostToDevice);
hipMemcpy(devPtrB,B, sizeof(float)* size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addVectorsMask), dim3(int(size/1024)),dim3(1024), 0, 0, devPtrA,devPtrB, devPtrC, size);
hipMemcpy(C,devPtrC, sizeof(float)* size, hipMemcpyDeviceToHost);
hipFree(devPtrA);
hipFree(devPtrB);
hipFree(devPtrC);
}
|
eedd94bb7a95793ea2a7e7bff08c5faba251bc25.cu
|
__global__ void addVectorsMask(float *A, float *B, float *C, int size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i!= size)
return;
C[i] = A[i] + B[i];
}
void addVectors(float *A, float *B, float *C, int size)
{
float *devPtrA = 0,*devPtrB = 0,*devPtrC = 0;
cudaMalloc(&devPtrA,sizeof(float)* size);
cudaMalloc(&devPtrB,sizeof(float)* size);
cudaMalloc(&devPtrC,sizeof(float)* size);
cudaMemcpy(devPtrA,A, sizeof(float)* size, cudaMemcpyHostToDevice);
cudaMemcpy(devPtrB,B, sizeof(float)* size, cudaMemcpyHostToDevice);
addVectorsMask<<<int(size/1024),1024>>>(devPtrA,devPtrB, devPtrC, size);
cudaMemcpy(C,devPtrC, sizeof(float)* size, cudaMemcpyDeviceToHost);
cudaFree(devPtrA);
cudaFree(devPtrB);
cudaFree(devPtrC);
}
|
ef3294f992e9a3ad24e91b2ce714c9959711d8a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// gsl-lite is based on GSL: Guidelines Support Library.
// For more information see https://github.com/martinmoene/gsl-lite
//
// Copyright (c) 2015 Martin Moene
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "../gsl-lite.t.hpp"
#include <memory>
__global__ void preconditionAssertionKernel( int i, int j )
{
gsl_Expects( i >= 0 );
gsl_ExpectsAudit( i < j );
}
CASE( "CUDA: Precondition and postcondition assertions can be used in kernel code" )
{
hipLaunchKernelGGL(( preconditionAssertionKernel), dim3(1), dim3(1), 0, 0, 0, 1 );
// TODO: check for failure
}
__global__ void spanKernel( gsl::span< int > span )
{
int* data = span.data();
gsl_CONFIG_SPAN_INDEX_TYPE size = span.size();
if (size > 0)
{
span[ 0 ] = 42;
at( span, 0 ) = 42;
}
// TODO: add more tests
}
CASE( "CUDA: span<> can be used in kernel code" )
{
hipLaunchKernelGGL(( spanKernel), dim3(1), dim3(1), 0, 0, gsl::span< int >( ) );
// TODO: check for failure
// TODO: add more tests
}
__global__ void notNullKernel( gsl::not_null< int* > ptr )
{
// TODO: add more tests
}
CASE( "CUDA: not_null<> can be used in kernel code" )
{
// TODO: run kernel
// TODO: check for failure
// TODO: add more tests
}
|
ef3294f992e9a3ad24e91b2ce714c9959711d8a1.cu
|
//
// gsl-lite is based on GSL: Guidelines Support Library.
// For more information see https://github.com/martinmoene/gsl-lite
//
// Copyright (c) 2015 Martin Moene
// Copyright (c) 2015 Microsoft Corporation. All rights reserved.
//
// This code is licensed under the MIT License (MIT).
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "../gsl-lite.t.hpp"
#include <memory>
__global__ void preconditionAssertionKernel( int i, int j )
{
gsl_Expects( i >= 0 );
gsl_ExpectsAudit( i < j );
}
CASE( "CUDA: Precondition and postcondition assertions can be used in kernel code" )
{
preconditionAssertionKernel<<<1, 1>>>( 0, 1 );
// TODO: check for failure
}
__global__ void spanKernel( gsl::span< int > span )
{
int* data = span.data();
gsl_CONFIG_SPAN_INDEX_TYPE size = span.size();
if (size > 0)
{
span[ 0 ] = 42;
at( span, 0 ) = 42;
}
// TODO: add more tests
}
CASE( "CUDA: span<> can be used in kernel code" )
{
spanKernel<<<1, 1>>>( gsl::span< int >( ) );
// TODO: check for failure
// TODO: add more tests
}
__global__ void notNullKernel( gsl::not_null< int* > ptr )
{
// TODO: add more tests
}
CASE( "CUDA: not_null<> can be used in kernel code" )
{
// TODO: run kernel
// TODO: check for failure
// TODO: add more tests
}
|
6902eb2da266b8bd1b40df1300ced5ef913a7b5b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* pathtracer.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file implements the shading stage of the wavefront algorithm.
It takes a buffer of hit results and populates a new buffer with
extension rays. Shadow rays are added with 'potential contributions'
as fire-and-forget rays, to be traced later. Streams are compacted
using simple atomics. The kernel is a 'persistent kernel': a fixed
number of threads fights for food by atomically decreasing a counter.
The implemented path tracer is deliberately simple.
This file is as similar as possible to the one in OptixPrime_B.
*/
#include "noerrors.h"
// path state flags
#define S_SPECULAR 1 // previous path vertex was specular
#define S_BOUNCED 2 // path encountered a diffuse vertex
#define S_VIASPECULAR 4 // path has seen at least one specular vertex
// readability defines; data layout is optimized for 128-bit accesses
#define INSTANCEIDX (prim >> 20)
#define HIT_U hitData.x
#define HIT_V hitData.y
#define HIT_T hitData.w
#define RAY_O make_float3( O4 )
#define FLAGS data
#define PATHIDX (data >> 8)
// +-----------------------------------------------------------------------------+
// | shadeKernel |
// | Implements the shade phase of the wavefront path tracer. LH2'19|
// +-----------------------------------------------------------------------------+
__global__ __launch_bounds__( 128 /* max block size */, 4 /* min blocks per sm */ )
void shadeKernel( float4* accumulator, const uint stride,
float4* pathStates, const float4* hits, float4* connections,
const uint R0, const uint* blueNoise, const int pass,
const int probePixelIdx, const int pathLength, const int w, const int h, const float spreadAngle,
const float3 p1, const float3 p2, const float3 p3, const float3 pos, const int pathCount )
{
// respect boundaries
int jobIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (jobIndex >= pathCount) return;
// gather data by reading sets of four floats for optimal throughput
const float4 O4 = pathStates[jobIndex]; // ray origin xyz, w can be ignored
const float4 D4 = pathStates[jobIndex + stride]; // ray direction xyz
float4 T4 = pathLength == 1 ? make_float4( 1 ) /* faster */ : pathStates[jobIndex + stride * 2]; // path thoughput rgb
const float4 hitData = hits[jobIndex];
const float bsdfPdf = T4.w;
// derived data
uint data = __float_as_uint( O4.w );
const float3 D = make_float3( D4 );
const int prim = __float_as_int( hitData.z );
const int primIdx = prim == -1 ? prim : (prim & 0xfffff);
float3 throughput = make_float3( T4 );
const CoreTri4* instanceTriangles = (const CoreTri4*)instanceDescriptors[INSTANCEIDX].triangles;
const uint pathIdx = PATHIDX;
const uint pixelIdx = pathIdx % (w * h);
const uint sampleIdx = pathIdx / (w * h) + pass;
// initialize depth in accumulator for DOF shader
if (pathLength == 1) accumulator[pixelIdx].w += primIdx == NOHIT ? 10000 : HIT_T;
// use skydome if we didn't hit any geometry
if (primIdx == NOHIT)
{
float3 contribution = throughput * make_float3( SampleSkydome( D, pathLength ) ) * (1.0f / bsdfPdf);
CLAMPINTENSITY; // limit magnitude of thoughput vector to combat fireflies
FIXNAN_FLOAT3( contribution );
accumulator[pixelIdx] += make_float4( contribution, 0 );
return;
}
// object picking
if (pixelIdx == probePixelIdx && pathLength == 1 && sampleIdx == 0)
counters->probedInstid = INSTANCEIDX, // record instace id at the selected pixel
counters->probedTriid = primIdx, // record primitive id at the selected pixel
counters->probedDist = HIT_T; // record primary ray hit distance
// get shadingData and normals
ShadingData shadingData;
float3 N, iN, fN, T;
const float3 I = RAY_O + HIT_T * D;
const float coneWidth = spreadAngle * HIT_T;
GetShadingData( D, HIT_U, HIT_V, coneWidth, instanceTriangles[primIdx], INSTANCEIDX, shadingData, N, iN, fN, T );
// we need to detect alpha in the shading code.
if (shadingData.flags & 1)
{
if (pathLength < MAXPATHLENGTH)
{
const uint extensionRayIdx = atomicAdd( &counters->extensionRays, 1 );
pathStates[extensionRayIdx] = make_float4( I + D * geometryEpsilon, O4.w );
pathStates[extensionRayIdx + stride] = D4;
if (!(isfinite( T4.x + T4.y + T4.z ))) T4 = make_float4( 0, 0, 0, T4.w );
pathStates[extensionRayIdx + stride * 2] = T4;
}
return;
}
// path regularization
// if (FLAGS & S_BOUNCED) shadingData.roughness2 = max( 0.7f, shadingData.roughness2 );
// stop on light
if (shadingData.IsEmissive() /* r, g or b exceeds 1 */)
{
const float DdotNL = -dot( D, N );
float3 contribution = make_float3( 0 ); // initialization required.
if (DdotNL > 0 /* lights are not double sided */)
{
if (pathLength == 1 || (FLAGS & S_SPECULAR) > 0)
{
// only camera rays will be treated special
contribution = shadingData.color;
}
else
{
// last vertex was not specular: apply MIS
const float3 lastN = UnpackNormal( __float_as_uint( D4.w ) );
const CoreTri& tri = (const CoreTri&)instanceTriangles[primIdx];
const float lightPdf = CalculateLightPDF( D, HIT_T, tri.area, N );
const float pickProb = LightPickProb( tri.ltriIdx, RAY_O, lastN, I /* the N at the previous vertex */ );
if ((bsdfPdf + lightPdf * pickProb) > 0) contribution = throughput * shadingData.color * (1.0f / (bsdfPdf + lightPdf * pickProb));
contribution = throughput * shadingData.color * (1.0f / (bsdfPdf + lightPdf));
}
CLAMPINTENSITY;
FIXNAN_FLOAT3( contribution );
accumulator[pixelIdx] += make_float4( contribution, 0 );
}
return;
}
// detect specular surfaces
if (ROUGHNESS < 0.01f) FLAGS |= S_SPECULAR; else FLAGS &= ~S_SPECULAR;
// initialize seed based on pixel index
uint seed = WangHash( pathIdx + R0 /* well-seeded xor32 is all you need */ );
// normal alignment for backfacing polygons
const float flip = (dot( D, N ) > 0) ? -1 : 1;
N *= flip; // fix geometric normal
iN *= flip; // fix interpolated normal (consistent normal interpolation)
fN *= flip; // fix final normal (includes normal map)
// apply postponed bsdf pdf
throughput *= 1.0f / bsdfPdf;
// next event estimation: connect eye path to light
if (!(FLAGS & S_SPECULAR)) // skip for specular vertices
{
float3 lightColor;
float r0, r1, pickProb, lightPdf = 0;
if (sampleIdx < 256)
{
const uint x = (pixelIdx % w) & 127, y = (pixelIdx / w) & 127;
r0 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 4 );
r1 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 5 );
}
else
{
r0 = RandomFloat( seed );
r1 = RandomFloat( seed );
}
float3 L = RandomPointOnLight( r0, r1, I, fN, pickProb, lightPdf, lightColor ) - I;
const float dist = length( L );
L *= 1.0f / dist;
const float NdotL = dot( L, fN );
if (NdotL > 0 && dot( fN, L ) > 0 && lightPdf > 0)
{
float bsdfPdf;
const float3 sampledBSDF = EvaluateBSDF( shadingData, fN, T, D * -1.0f, L, bsdfPdf );
if (bsdfPdf > 0)
{
// calculate potential contribution
float3 contribution = throughput * sampledBSDF * lightColor * (NdotL / (pickProb * lightPdf + bsdfPdf));
FIXNAN_FLOAT3( contribution );
CLAMPINTENSITY;
// add fire-and-forget shadow ray to the connections buffer
const uint shadowRayIdx = atomicAdd( &counters->shadowRays, 1 ); // compaction
connections[shadowRayIdx] = make_float4( SafeOrigin( I, L, N, geometryEpsilon ), 0 ); // O4
connections[shadowRayIdx + stride * MAXPATHLENGTH] = make_float4( L, dist - 2 * geometryEpsilon ); // D4
connections[shadowRayIdx + stride * 2 * MAXPATHLENGTH] = make_float4( contribution, __int_as_float( pixelIdx ) ); // E4
}
}
}
// cap at one diffuse bounce (because of this we also don't need Russian roulette)
if (FLAGS & S_BOUNCED) return;
// depth cap
if (pathLength == MAXPATHLENGTH /* don't fill arrays with rays we won't trace */) return;
// evaluate bsdf to obtain direction for next path segment
float3 R;
float newBsdfPdf, r3, r4;
if (sampleIdx < 256)
{
const uint x = (pixelIdx % w) & 127, y = (pixelIdx / w) & 127;
r3 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 4 );
r4 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 5 );
}
else
{
r3 = RandomFloat( seed );
r4 = RandomFloat( seed );
}
const float3 bsdf = SampleBSDF( shadingData, fN, N, T, D * -1.0f, r3, r4, R, newBsdfPdf );
if (newBsdfPdf < EPSILON || isnan( newBsdfPdf )) return;
// write extension ray
const uint extensionRayIdx = atomicAdd( &counters->extensionRays, 1 ); // compact
const uint packedNormal = PackNormal( fN );
if (!(FLAGS & S_SPECULAR)) FLAGS |= S_BOUNCED; else FLAGS |= S_VIASPECULAR;
((float4*)pathStates)[extensionRayIdx] = make_float4( SafeOrigin( I, R, N, geometryEpsilon ), __uint_as_float( FLAGS ) );
((float4*)pathStates)[extensionRayIdx + stride] = make_float4( R, __uint_as_float( packedNormal ) );
FIXNAN_FLOAT3( throughput );
((float4*)pathStates)[extensionRayIdx + stride * 2] = make_float4( throughput * bsdf * abs( dot( fN, R ) ), newBsdfPdf );
}
// +-----------------------------------------------------------------------------+
// | shadeKernel |
// | Host-side access point for the shadeKernel code. LH2'19|
// +-----------------------------------------------------------------------------+
__host__ void shade( const int pathCount, float4* accumulator, const uint stride,
float4* pathStates, const float4* hits, float4* connections,
const uint R0, const uint* blueNoise, const int pass,
const int probePixelIdx, const int pathLength, const int scrwidth, const int scrheight, const float spreadAngle,
const float3 p1, const float3 p2, const float3 p3, const float3 pos )
{
const dim3 gridDim( NEXTMULTIPLEOF( pathCount, 128 ) / 128, 1 ), blockDim( 128, 1 );
hipLaunchKernelGGL(( shadeKernel), dim3(gridDim.x), dim3(128), 0, 0, accumulator, stride, pathStates, hits, connections, R0, blueNoise,
pass, probePixelIdx, pathLength, scrwidth, scrheight, spreadAngle, p1, p2, p3, pos, pathCount );
}
// EOF
|
6902eb2da266b8bd1b40df1300ced5ef913a7b5b.cu
|
/* pathtracer.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file implements the shading stage of the wavefront algorithm.
It takes a buffer of hit results and populates a new buffer with
extension rays. Shadow rays are added with 'potential contributions'
as fire-and-forget rays, to be traced later. Streams are compacted
using simple atomics. The kernel is a 'persistent kernel': a fixed
number of threads fights for food by atomically decreasing a counter.
The implemented path tracer is deliberately simple.
This file is as similar as possible to the one in OptixPrime_B.
*/
#include "noerrors.h"
// path state flags
#define S_SPECULAR 1 // previous path vertex was specular
#define S_BOUNCED 2 // path encountered a diffuse vertex
#define S_VIASPECULAR 4 // path has seen at least one specular vertex
// readability defines; data layout is optimized for 128-bit accesses
#define INSTANCEIDX (prim >> 20)
#define HIT_U hitData.x
#define HIT_V hitData.y
#define HIT_T hitData.w
#define RAY_O make_float3( O4 )
#define FLAGS data
#define PATHIDX (data >> 8)
// +-----------------------------------------------------------------------------+
// | shadeKernel |
// | Implements the shade phase of the wavefront path tracer. LH2'19|
// +-----------------------------------------------------------------------------+
__global__ __launch_bounds__( 128 /* max block size */, 4 /* min blocks per sm */ )
void shadeKernel( float4* accumulator, const uint stride,
float4* pathStates, const float4* hits, float4* connections,
const uint R0, const uint* blueNoise, const int pass,
const int probePixelIdx, const int pathLength, const int w, const int h, const float spreadAngle,
const float3 p1, const float3 p2, const float3 p3, const float3 pos, const int pathCount )
{
// respect boundaries
int jobIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (jobIndex >= pathCount) return;
// gather data by reading sets of four floats for optimal throughput
const float4 O4 = pathStates[jobIndex]; // ray origin xyz, w can be ignored
const float4 D4 = pathStates[jobIndex + stride]; // ray direction xyz
float4 T4 = pathLength == 1 ? make_float4( 1 ) /* faster */ : pathStates[jobIndex + stride * 2]; // path thoughput rgb
const float4 hitData = hits[jobIndex];
const float bsdfPdf = T4.w;
// derived data
uint data = __float_as_uint( O4.w );
const float3 D = make_float3( D4 );
const int prim = __float_as_int( hitData.z );
const int primIdx = prim == -1 ? prim : (prim & 0xfffff);
float3 throughput = make_float3( T4 );
const CoreTri4* instanceTriangles = (const CoreTri4*)instanceDescriptors[INSTANCEIDX].triangles;
const uint pathIdx = PATHIDX;
const uint pixelIdx = pathIdx % (w * h);
const uint sampleIdx = pathIdx / (w * h) + pass;
// initialize depth in accumulator for DOF shader
if (pathLength == 1) accumulator[pixelIdx].w += primIdx == NOHIT ? 10000 : HIT_T;
// use skydome if we didn't hit any geometry
if (primIdx == NOHIT)
{
float3 contribution = throughput * make_float3( SampleSkydome( D, pathLength ) ) * (1.0f / bsdfPdf);
CLAMPINTENSITY; // limit magnitude of thoughput vector to combat fireflies
FIXNAN_FLOAT3( contribution );
accumulator[pixelIdx] += make_float4( contribution, 0 );
return;
}
// object picking
if (pixelIdx == probePixelIdx && pathLength == 1 && sampleIdx == 0)
counters->probedInstid = INSTANCEIDX, // record instace id at the selected pixel
counters->probedTriid = primIdx, // record primitive id at the selected pixel
counters->probedDist = HIT_T; // record primary ray hit distance
// get shadingData and normals
ShadingData shadingData;
float3 N, iN, fN, T;
const float3 I = RAY_O + HIT_T * D;
const float coneWidth = spreadAngle * HIT_T;
GetShadingData( D, HIT_U, HIT_V, coneWidth, instanceTriangles[primIdx], INSTANCEIDX, shadingData, N, iN, fN, T );
// we need to detect alpha in the shading code.
if (shadingData.flags & 1)
{
if (pathLength < MAXPATHLENGTH)
{
const uint extensionRayIdx = atomicAdd( &counters->extensionRays, 1 );
pathStates[extensionRayIdx] = make_float4( I + D * geometryEpsilon, O4.w );
pathStates[extensionRayIdx + stride] = D4;
if (!(isfinite( T4.x + T4.y + T4.z ))) T4 = make_float4( 0, 0, 0, T4.w );
pathStates[extensionRayIdx + stride * 2] = T4;
}
return;
}
// path regularization
// if (FLAGS & S_BOUNCED) shadingData.roughness2 = max( 0.7f, shadingData.roughness2 );
// stop on light
if (shadingData.IsEmissive() /* r, g or b exceeds 1 */)
{
const float DdotNL = -dot( D, N );
float3 contribution = make_float3( 0 ); // initialization required.
if (DdotNL > 0 /* lights are not double sided */)
{
if (pathLength == 1 || (FLAGS & S_SPECULAR) > 0)
{
// only camera rays will be treated special
contribution = shadingData.color;
}
else
{
// last vertex was not specular: apply MIS
const float3 lastN = UnpackNormal( __float_as_uint( D4.w ) );
const CoreTri& tri = (const CoreTri&)instanceTriangles[primIdx];
const float lightPdf = CalculateLightPDF( D, HIT_T, tri.area, N );
const float pickProb = LightPickProb( tri.ltriIdx, RAY_O, lastN, I /* the N at the previous vertex */ );
if ((bsdfPdf + lightPdf * pickProb) > 0) contribution = throughput * shadingData.color * (1.0f / (bsdfPdf + lightPdf * pickProb));
contribution = throughput * shadingData.color * (1.0f / (bsdfPdf + lightPdf));
}
CLAMPINTENSITY;
FIXNAN_FLOAT3( contribution );
accumulator[pixelIdx] += make_float4( contribution, 0 );
}
return;
}
// detect specular surfaces
if (ROUGHNESS < 0.01f) FLAGS |= S_SPECULAR; else FLAGS &= ~S_SPECULAR;
// initialize seed based on pixel index
uint seed = WangHash( pathIdx + R0 /* well-seeded xor32 is all you need */ );
// normal alignment for backfacing polygons
const float flip = (dot( D, N ) > 0) ? -1 : 1;
N *= flip; // fix geometric normal
iN *= flip; // fix interpolated normal (consistent normal interpolation)
fN *= flip; // fix final normal (includes normal map)
// apply postponed bsdf pdf
throughput *= 1.0f / bsdfPdf;
// next event estimation: connect eye path to light
if (!(FLAGS & S_SPECULAR)) // skip for specular vertices
{
float3 lightColor;
float r0, r1, pickProb, lightPdf = 0;
if (sampleIdx < 256)
{
const uint x = (pixelIdx % w) & 127, y = (pixelIdx / w) & 127;
r0 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 4 );
r1 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 5 );
}
else
{
r0 = RandomFloat( seed );
r1 = RandomFloat( seed );
}
float3 L = RandomPointOnLight( r0, r1, I, fN, pickProb, lightPdf, lightColor ) - I;
const float dist = length( L );
L *= 1.0f / dist;
const float NdotL = dot( L, fN );
if (NdotL > 0 && dot( fN, L ) > 0 && lightPdf > 0)
{
float bsdfPdf;
const float3 sampledBSDF = EvaluateBSDF( shadingData, fN, T, D * -1.0f, L, bsdfPdf );
if (bsdfPdf > 0)
{
// calculate potential contribution
float3 contribution = throughput * sampledBSDF * lightColor * (NdotL / (pickProb * lightPdf + bsdfPdf));
FIXNAN_FLOAT3( contribution );
CLAMPINTENSITY;
// add fire-and-forget shadow ray to the connections buffer
const uint shadowRayIdx = atomicAdd( &counters->shadowRays, 1 ); // compaction
connections[shadowRayIdx] = make_float4( SafeOrigin( I, L, N, geometryEpsilon ), 0 ); // O4
connections[shadowRayIdx + stride * MAXPATHLENGTH] = make_float4( L, dist - 2 * geometryEpsilon ); // D4
connections[shadowRayIdx + stride * 2 * MAXPATHLENGTH] = make_float4( contribution, __int_as_float( pixelIdx ) ); // E4
}
}
}
// cap at one diffuse bounce (because of this we also don't need Russian roulette)
if (FLAGS & S_BOUNCED) return;
// depth cap
if (pathLength == MAXPATHLENGTH /* don't fill arrays with rays we won't trace */) return;
// evaluate bsdf to obtain direction for next path segment
float3 R;
float newBsdfPdf, r3, r4;
if (sampleIdx < 256)
{
const uint x = (pixelIdx % w) & 127, y = (pixelIdx / w) & 127;
r3 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 4 );
r4 = blueNoiseSampler( blueNoise, x, y, sampleIdx, 5 );
}
else
{
r3 = RandomFloat( seed );
r4 = RandomFloat( seed );
}
const float3 bsdf = SampleBSDF( shadingData, fN, N, T, D * -1.0f, r3, r4, R, newBsdfPdf );
if (newBsdfPdf < EPSILON || isnan( newBsdfPdf )) return;
// write extension ray
const uint extensionRayIdx = atomicAdd( &counters->extensionRays, 1 ); // compact
const uint packedNormal = PackNormal( fN );
if (!(FLAGS & S_SPECULAR)) FLAGS |= S_BOUNCED; else FLAGS |= S_VIASPECULAR;
((float4*)pathStates)[extensionRayIdx] = make_float4( SafeOrigin( I, R, N, geometryEpsilon ), __uint_as_float( FLAGS ) );
((float4*)pathStates)[extensionRayIdx + stride] = make_float4( R, __uint_as_float( packedNormal ) );
FIXNAN_FLOAT3( throughput );
((float4*)pathStates)[extensionRayIdx + stride * 2] = make_float4( throughput * bsdf * abs( dot( fN, R ) ), newBsdfPdf );
}
// +-----------------------------------------------------------------------------+
// | shadeKernel |
// | Host-side access point for the shadeKernel code. LH2'19|
// +-----------------------------------------------------------------------------+
__host__ void shade( const int pathCount, float4* accumulator, const uint stride,
float4* pathStates, const float4* hits, float4* connections,
const uint R0, const uint* blueNoise, const int pass,
const int probePixelIdx, const int pathLength, const int scrwidth, const int scrheight, const float spreadAngle,
const float3 p1, const float3 p2, const float3 p3, const float3 pos )
{
const dim3 gridDim( NEXTMULTIPLEOF( pathCount, 128 ) / 128, 1 ), blockDim( 128, 1 );
shadeKernel<<<gridDim.x, 128>>>( accumulator, stride, pathStates, hits, connections, R0, blueNoise,
pass, probePixelIdx, pathLength, scrwidth, scrheight, spreadAngle, p1, p2, p3, pos, pathCount );
}
// EOF
|
0799d11f35636945a2636b3b4d789be27a99a4d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
}
|
0799d11f35636945a2636b3b4d789be27a99a4d9.cu
|
#include "includes.h"
__global__ void kernelSumaMatrices(float *a, float *b,int m, int n) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
while(i<m){
j = threadIdx.y + blockIdx.y*blockDim.y;
while(j<n){
a[i*n+j]+=b[i*n+j];
j+= blockDim.y*gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
}
|
57b47e8d8acd242caad4600b34f867e4e1dbf9ed.hip
|
// !!! This is a file automatically generated by hipify!!!
//Darrien Park
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdio.h>
//no field in cudaDeviceProperties for number of cores. Therefore need to determine based on compute capability
int getCores(hipDeviceProp_t dev_prop)
{
int cores = 0;
int sm = dev_prop.multiProcessorCount;
//start switch case based on major compute capability
switch (dev_prop.major){
//Fermi
case 2:
if (dev_prop.minor == 1)
cores = sm * 48;
else cores = sm * 32;
break;
//Kepler
case 3:
cores = sm * 192;
break;
//Maxwell
case 5:
cores = sm * 128;
break;
//Pascal
case 6:
if (dev_prop.minor == 1)
cores = sm * 128;
else if (dev_prop.minor == 0)
cores = sm * 64;
else printf("Unknown device type \n");
break;
//Volta
case 7:
if (dev_prop.minor == 0)
cores = sm * 64;
else printf("Unknown device type \n");
break;
//base case: can't be detected
default:
printf("Unknown device type \n");
break;
}
return cores;
}
int main(int argc, char * argv[])
{
int dev_count;
hipGetDeviceCount(& dev_count);
printf("Number of CUDA devices is [%d]\n\n",dev_count);
for(int i = 0; i < dev_count; i++){
int k = i+1;
printf("Device [%d]\n", k);
hipDeviceProp_t dev_props;
hipGetDeviceProperties(&dev_props, 0); //hipGetDeviceProperties(hipDeviceProp_t* prop, int device#)
printf(" Device Name: %s\n",dev_props.name);
printf(" Memory Clock Rate (KHz): %d\n",dev_props.memoryClockRate);
printf(" Number of Streaming Multiprocessors: %d\n",dev_props.multiProcessorCount);
printf(" Number of cores: %d\n",getCores(dev_props));
printf(" Warp Size: %d\n",dev_props.warpSize);
printf(" Total Global Memory: %d\n",dev_props.totalGlobalMem);
printf(" Total Constant Memory: %d\n",dev_props.totalConstMem);
printf(" Shared Memory/Block: %d\n",dev_props.sharedMemPerBlock);
printf(" Number of Registers/Block: %d\n",dev_props.regsPerBlock);
printf(" Number of Threads/Block: %d\n",dev_props.maxThreadsPerBlock);
printf(" Max Block Dimension: %d\n",dev_props.maxThreadsDim);
printf(" Max Grid Dimension: %d\n",dev_props.maxGridSize);
}
return 0;
}
|
57b47e8d8acd242caad4600b34f867e4e1dbf9ed.cu
|
//Darrien Park
#include "cuda_runtime.h"
#include <string.h>
#include <stdio.h>
//no field in cudaDeviceProperties for number of cores. Therefore need to determine based on compute capability
int getCores(cudaDeviceProp dev_prop)
{
int cores = 0;
int sm = dev_prop.multiProcessorCount;
//start switch case based on major compute capability
switch (dev_prop.major){
//Fermi
case 2:
if (dev_prop.minor == 1)
cores = sm * 48;
else cores = sm * 32;
break;
//Kepler
case 3:
cores = sm * 192;
break;
//Maxwell
case 5:
cores = sm * 128;
break;
//Pascal
case 6:
if (dev_prop.minor == 1)
cores = sm * 128;
else if (dev_prop.minor == 0)
cores = sm * 64;
else printf("Unknown device type \n");
break;
//Volta
case 7:
if (dev_prop.minor == 0)
cores = sm * 64;
else printf("Unknown device type \n");
break;
//base case: can't be detected
default:
printf("Unknown device type \n");
break;
}
return cores;
}
int main(int argc, char * argv[])
{
int dev_count;
cudaGetDeviceCount(& dev_count);
printf("Number of CUDA devices is [%d]\n\n",dev_count);
for(int i = 0; i < dev_count; i++){
int k = i+1;
printf("Device [%d]\n", k);
cudaDeviceProp dev_props;
cudaGetDeviceProperties(&dev_props, 0); //cudaGetDeviceProperties(cudaDeviceProp* prop, int device#)
printf(" Device Name: %s\n",dev_props.name);
printf(" Memory Clock Rate (KHz): %d\n",dev_props.memoryClockRate);
printf(" Number of Streaming Multiprocessors: %d\n",dev_props.multiProcessorCount);
printf(" Number of cores: %d\n",getCores(dev_props));
printf(" Warp Size: %d\n",dev_props.warpSize);
printf(" Total Global Memory: %d\n",dev_props.totalGlobalMem);
printf(" Total Constant Memory: %d\n",dev_props.totalConstMem);
printf(" Shared Memory/Block: %d\n",dev_props.sharedMemPerBlock);
printf(" Number of Registers/Block: %d\n",dev_props.regsPerBlock);
printf(" Number of Threads/Block: %d\n",dev_props.maxThreadsPerBlock);
printf(" Max Block Dimension: %d\n",dev_props.maxThreadsDim);
printf(" Max Grid Dimension: %d\n",dev_props.maxGridSize);
}
return 0;
}
|
53f1106d8416e2677114e5e65988cf48a8d8007d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/interpolate_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h"
namespace phi {
using paddle::platform::FastDivMod;
template <typename T>
__forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex(
int* in_img_idx,
int* x_id,
T* lambda1,
T* lambda2,
T src_x,
const int in_img_x) {
src_x = (src_x > 0) ? src_x : 0.f;
*in_img_idx = static_cast<int>(src_x);
*x_id = (*in_img_idx < in_img_x - 1) ? 1 : 0;
*lambda1 = src_x - *in_img_idx;
*lambda2 = 1.f - *lambda1;
}
template <typename T>
__global__ void KeLinearInterpFw(const T* in,
const size_t in_img_w,
const size_t input_w,
T* out,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idx = tid % out_img_w;
} else {
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
const T* in_pos =
&in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id];
} else {
const T* in_pos =
&in[out_id_h * input_w + in_img_idx * num_channels + channel_id];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels];
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
// nearest_sampling by multiple read in_addr and write to out_addr
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
out[out_index] = in[in_index];
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int in_img_size = in_img_h * in_img_w;
int out_img_size = out_img_h * out_img_w;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
template <typename T>
__global__ void KeBilinearInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const T align_type_value,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
// bilinear interpolation
const T* in_pos =
&in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
out[tid] =
h2lambda *
(w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) +
h1lambda *
(w2lambda * in_pos[h_id * in_img_w * num_channels] +
w1lambda *
in_pos[h_id * in_img_w * num_channels + w_id * num_channels]);
}
}
template <typename T>
__global__ void KeBilinearInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const T align_type_value) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
const T* in_pos = &in[in_index];
out[out_index] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__device__ __forceinline__ static T Kecubic_interp(
const T x0, const T x1, const T x2, const T x3, T t) {
T coeffs[4];
T a = -0.75;
T x_1 = t;
T x_2 = 1.0 - t;
coeffs[0] = funcs::CubicConvolution2<T>(x_1 + 1.0, a);
coeffs[1] = funcs::CubicConvolution1<T>(x_1, a);
coeffs[2] = funcs::CubicConvolution1<T>(x_2, a);
coeffs[3] = funcs::CubicConvolution2<T>(x_2 + 1.0, a);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
__global__ void KeBicubicInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idy = (out_id_w % out_img_size) / out_img_w;
out_img_idx = tid % out_img_w;
} else {
out_img_idy = out_id_w / (out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
T in_img_idy = align_corners
? static_cast<T>(ratio_h * out_img_idy)
: static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5);
int input_y = floorf(in_img_idy);
const T y_t = in_img_idy - input_y;
T in_img_idx = align_corners
? static_cast<T>(ratio_w * out_img_idx)
: static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5);
int input_x = floorf(in_img_idx);
const T x_t = in_img_idx - input_x;
T coefficients[4];
const T* in_pos_0;
const T* in_pos_1;
const T* in_pos_2;
const T* in_pos_3;
int access_x_0;
if (data_layout == DataLayout::kNCHW) {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0);
access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0);
in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_0];
in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_1];
in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_2];
in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_3];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
} else {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0);
int access_x_0 =
max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0);
const T* in_pos_0 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_0 * num_channels + channel_id];
const T* in_pos_1 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_1 * num_channels + channel_id];
const T* in_pos_2 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_2 * num_channels + channel_id];
const T* in_pos_3 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_3 * num_channels + channel_id];
coefficients[k] = Kecubic_interp(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] =
static_cast<T>(Kecubic_interp(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t));
}
}
}
template <typename T>
__global__ void KeTrilinearInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = align_flag
? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5)
: static_cast<int>(ratio_d * out_img_idt);
in_img_idt = (in_img_idt > 0) ? in_img_idt : 0;
int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0;
T src_d = ratio_d * (out_img_idt + 0.5) - 0.5;
src_d = (src_d > 0) ? src_d : 0;
T d1lambda =
align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt;
T d2lambda = 1.f - d1lambda;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size +
(in_img_idt * in_img_h + in_img_idy) * in_img_w +
in_img_idx;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w] +
w1lambda * in_pos1[h_id * in_img_w + w_id])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w] +
w1lambda * in_pos2[h_id * in_img_w + w_id]));
} else {
int in_pos1_idx = out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] +
w1lambda * in_pos1[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] +
w1lambda * in_pos1[h_id * in_img_w * num_channels +
w_id * num_channels])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] +
w1lambda * in_pos2[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] +
w1lambda * in_pos2[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighbor3DInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w; // ncdhw
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = (align_corners)
? static_cast<int>(ratio_d * out_img_idt + 0.5)
: static_cast<int>(ratio_d * out_img_idt);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
if (data_layout == DataLayout::kNCHW) {
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idt * in_img_h * in_img_w + in_img_idy * in_img_w +
in_img_idx];
} else {
out[tid] = in[out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
}
template <typename T, typename Context>
static void Interpolate1DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_w = new_size[0];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_w = size_data[0];
}
}
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
: static_cast<float>(new_scale_w);
}
int64_t in_cw = c * in_w;
int64_t out_cw = c * out_w;
auto pixelNum = n * out_cw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("linear" == interp_method) {
hipLaunchKernelGGL(( KeLinearInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_w,
in_cw,
output_data,
out_w,
n,
out_cw,
c,
ratio_w,
align_corners,
align_mode,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate2DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_h = new_size[0];
out_w = new_size[1];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_w = scale[1];
scale_h = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_w > 0. && scale_h > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
}
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_hw = in_h * in_w;
int64_t out_hw = out_h * out_w;
int64_t in_chw = c * in_hw;
int64_t out_chw = c * out_hw;
auto pixelNum = n * out_chw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("nearest" == interp_method) {
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
hipLaunchKernelGGL(( KeNearestNeighborInterpNCHWFw<T>), dim3(config_3d.block_per_grid),
dim3(config_3d.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_corners);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
hipLaunchKernelGGL(( KeNearestNeighborInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
interp_divmods);
}
} else if ("bilinear" == interp_method) {
dim3 thread_num = config.thread_per_block;
#ifdef WITH_NV_JETSON
if (config.compute_capability == 53 || config.compute_capability == 62) {
thread_num = 512;
}
#endif
const T align_type_value = (align_mode == 0 && !align_corners) ? 0.5f : 0;
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
hipLaunchKernelGGL(( KeBilinearInterpNCHWFw<T>), dim3(config_3d.block_per_grid),
dim3(config_3d.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_type_value);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
hipLaunchKernelGGL(( KeBilinearInterpFw<T>)
, dim3(config.block_per_grid), dim3(thread_num), 0, dev_ctx.stream(),
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_type_value,
interp_divmods);
}
} else if ("bicubic" == interp_method) {
#ifdef __HIPCC__
constexpr int thread_per_block = 256;
#else
constexpr int thread_per_block = 512;
#endif
hipLaunchKernelGGL(( KeBicubicInterpFw<T>)
, dim3(config.block_per_grid), dim3(thread_per_block), 0, dev_ctx.stream(),
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate3DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_d = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_d = size_data[0];
out_h = size_data[1];
out_w = size_data[2];
}
}
PADDLE_ENFORCE_GT(
out_d,
0,
errors::InvalidArgument("out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_d == out_d && in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_dhw = in_d * in_h * in_w;
int64_t out_dhw = out_d * out_h * out_w;
int64_t in_cdhw = c * in_dhw;
int64_t out_cdhw = c * out_dhw;
auto pixelNum = n * out_cdhw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("trilinear" == interp_method) {
hipLaunchKernelGGL(( KeTrilinearInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
align_mode,
data_layout);
} else if ("nearest" == interp_method) {
hipLaunchKernelGGL(( KeNearestNeighbor3DInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
void InterpolateKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto input_dims = x.dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
}
template <typename T, typename Context>
void BilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void NearestInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void TrilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void LinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void BicubicInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(nearest_interp_v2,
GPU,
ALL_LAYOUT,
phi::NearestInterpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(trilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::TrilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(linear_interp_v2,
GPU,
ALL_LAYOUT,
phi::LinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(bicubic_interp_v2,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double,
int) {}
|
53f1106d8416e2677114e5e65988cf48a8d8007d.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/interpolate_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h"
namespace phi {
using paddle::platform::FastDivMod;
template <typename T>
__forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex(
int* in_img_idx,
int* x_id,
T* lambda1,
T* lambda2,
T src_x,
const int in_img_x) {
src_x = (src_x > 0) ? src_x : 0.f;
*in_img_idx = static_cast<int>(src_x);
*x_id = (*in_img_idx < in_img_x - 1) ? 1 : 0;
*lambda1 = src_x - *in_img_idx;
*lambda2 = 1.f - *lambda1;
}
template <typename T>
__global__ void KeLinearInterpFw(const T* in,
const size_t in_img_w,
const size_t input_w,
T* out,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idx = tid % out_img_w;
} else {
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
const T* in_pos =
&in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id];
} else {
const T* in_pos =
&in[out_id_h * input_w + in_img_idx * num_channels + channel_id];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels];
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
// nearest_sampling by multiple read in_addr and write to out_addr
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
out[out_index] = in[in_index];
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int in_img_size = in_img_h * in_img_w;
int out_img_size = out_img_h * out_img_w;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
template <typename T>
__global__ void KeBilinearInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const T align_type_value,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
// bilinear interpolation
const T* in_pos =
&in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
out[tid] =
h2lambda *
(w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) +
h1lambda *
(w2lambda * in_pos[h_id * in_img_w * num_channels] +
w1lambda *
in_pos[h_id * in_img_w * num_channels + w_id * num_channels]);
}
}
template <typename T>
__global__ void KeBilinearInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const T align_type_value) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
int in_img_idx, in_img_idy, h_id, w_id;
T h1lambda, w1lambda, h2lambda, w2lambda;
T src_w = ratio_w * (out_img_idx + align_type_value) - align_type_value;
T src_h = ratio_h * (out_img_idy + align_type_value) - align_type_value;
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
const T* in_pos = &in[in_index];
out[out_index] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__device__ __forceinline__ static T Kecubic_interp(
const T x0, const T x1, const T x2, const T x3, T t) {
T coeffs[4];
T a = -0.75;
T x_1 = t;
T x_2 = 1.0 - t;
coeffs[0] = funcs::CubicConvolution2<T>(x_1 + 1.0, a);
coeffs[1] = funcs::CubicConvolution1<T>(x_1, a);
coeffs[2] = funcs::CubicConvolution1<T>(x_2, a);
coeffs[3] = funcs::CubicConvolution2<T>(x_2 + 1.0, a);
return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3];
}
template <typename T>
__global__ void KeBicubicInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idy = (out_id_w % out_img_size) / out_img_w;
out_img_idx = tid % out_img_w;
} else {
out_img_idy = out_id_w / (out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
T in_img_idy = align_corners
? static_cast<T>(ratio_h * out_img_idy)
: static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5);
int input_y = floorf(in_img_idy);
const T y_t = in_img_idy - input_y;
T in_img_idx = align_corners
? static_cast<T>(ratio_w * out_img_idx)
: static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5);
int input_x = floorf(in_img_idx);
const T x_t = in_img_idx - input_x;
T coefficients[4];
const T* in_pos_0;
const T* in_pos_1;
const T* in_pos_2;
const T* in_pos_3;
int access_x_0;
if (data_layout == DataLayout::kNCHW) {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0);
access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0);
in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_0];
in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_1];
in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_2];
in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_3];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
} else {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0);
int access_x_0 =
max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0);
const T* in_pos_0 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_0 * num_channels + channel_id];
const T* in_pos_1 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_1 * num_channels + channel_id];
const T* in_pos_2 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_2 * num_channels + channel_id];
const T* in_pos_3 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_3 * num_channels + channel_id];
coefficients[k] = Kecubic_interp(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] =
static_cast<T>(Kecubic_interp(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t));
}
}
}
template <typename T>
__global__ void KeTrilinearInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = align_flag
? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5)
: static_cast<int>(ratio_d * out_img_idt);
in_img_idt = (in_img_idt > 0) ? in_img_idt : 0;
int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0;
T src_d = ratio_d * (out_img_idt + 0.5) - 0.5;
src_d = (src_d > 0) ? src_d : 0;
T d1lambda =
align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt;
T d2lambda = 1.f - d1lambda;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
if (data_layout == DataLayout::kNCHW) {
int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size +
(in_img_idt * in_img_h + in_img_idy) * in_img_w +
in_img_idx;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w] +
w1lambda * in_pos1[h_id * in_img_w + w_id])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w] +
w1lambda * in_pos2[h_id * in_img_w + w_id]));
} else {
int in_pos1_idx = out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] +
w1lambda * in_pos1[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] +
w1lambda * in_pos1[h_id * in_img_w * num_channels +
w_id * num_channels])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] +
w1lambda * in_pos2[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] +
w1lambda * in_pos2[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighbor3DInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w; // ncdhw
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = (align_corners)
? static_cast<int>(ratio_d * out_img_idt + 0.5)
: static_cast<int>(ratio_d * out_img_idt);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
if (data_layout == DataLayout::kNCHW) {
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idt * in_img_h * in_img_w + in_img_idy * in_img_w +
in_img_idx];
} else {
out[tid] = in[out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
}
template <typename T, typename Context>
static void Interpolate1DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_w = new_size[0];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_w = size_data[0];
}
}
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
: static_cast<float>(new_scale_w);
}
int64_t in_cw = c * in_w;
int64_t out_cw = c * out_w;
auto pixelNum = n * out_cw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("linear" == interp_method) {
KeLinearInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_w,
in_cw,
output_data,
out_w,
n,
out_cw,
c,
ratio_w,
align_corners,
align_mode,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate2DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_h = new_size[0];
out_w = new_size[1];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_w = scale[1];
scale_h = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_w > 0. && scale_h > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
}
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_hw = in_h * in_w;
int64_t out_hw = out_h * out_w;
int64_t in_chw = c * in_hw;
int64_t out_chw = c * out_hw;
auto pixelNum = n * out_chw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("nearest" == interp_method) {
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
KeNearestNeighborInterpNCHWFw<T><<<config_3d.block_per_grid,
config_3d.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_corners);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
KeNearestNeighborInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
interp_divmods);
}
} else if ("bilinear" == interp_method) {
dim3 thread_num = config.thread_per_block;
#ifdef WITH_NV_JETSON
if (config.compute_capability == 53 || config.compute_capability == 62) {
thread_num = 512;
}
#endif
const T align_type_value = (align_mode == 0 && !align_corners) ? 0.5f : 0;
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
KeBilinearInterpNCHWFw<T><<<config_3d.block_per_grid,
config_3d.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_type_value);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
KeBilinearInterpFw<T>
<<<config.block_per_grid, thread_num, 0, dev_ctx.stream()>>>(
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_type_value,
interp_divmods);
}
} else if ("bicubic" == interp_method) {
#ifdef __HIPCC__
constexpr int thread_per_block = 256;
#else
constexpr int thread_per_block = 512;
#endif
KeBicubicInterpFw<T>
<<<config.block_per_grid, thread_per_block, 0, dev_ctx.stream()>>>(
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate3DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_d = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 1) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
paddle::framework::TensorCopySync(
*out_size, paddle::platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_d = size_data[0];
out_h = size_data[1];
out_w = size_data[2];
}
}
PADDLE_ENFORCE_GT(
out_d,
0,
errors::InvalidArgument("out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_d == out_d && in_h == out_h && in_w == out_w) {
paddle::framework::TensorCopy(input, dev_ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_dhw = in_d * in_h * in_w;
int64_t out_dhw = out_d * out_h * out_w;
int64_t in_cdhw = c * in_dhw;
int64_t out_cdhw = c * out_dhw;
auto pixelNum = n * out_cdhw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("trilinear" == interp_method) {
KeTrilinearInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
align_mode,
data_layout);
} else if ("nearest" == interp_method) {
KeNearestNeighbor3DInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
void InterpolateKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto input_dims = x.dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
}
template <typename T, typename Context>
void BilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void NearestInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void TrilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void LinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void BicubicInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(nearest_interp_v2,
GPU,
ALL_LAYOUT,
phi::NearestInterpKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(trilinear_interp_v2,
GPU,
ALL_LAYOUT,
phi::TrilinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(linear_interp_v2,
GPU,
ALL_LAYOUT,
phi::LinearInterpKernel,
float,
double,
int) {}
PD_REGISTER_KERNEL(bicubic_interp_v2,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double,
int) {}
|
4131cc4d47b458b7fbb305edd3872006f586c430.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "CUAPI.h"
#ifdef GPU
extern real *d_dt_Array_T;
extern real (*d_Flu_Array_T)[NCOMP_FLUID][ CUBE(PS1) ];
extern double (*d_Corner_Array_T)[3];
#ifdef GRAVITY
extern real (*d_Pot_Array_T)[ CUBE(GRA_NXT) ];
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemFree_dt
// Description : Free the GPU and CPU memory previously allocated by CUAPI_MemAllocate_dt()
//
// Parameter : None
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemFree_dt()
{
// free the device memory
if ( d_dt_Array_T != NULL ) CUDA_CHECK_ERROR( hipFree( d_dt_Array_T ) );
if ( d_Flu_Array_T != NULL ) CUDA_CHECK_ERROR( hipFree( d_Flu_Array_T ) );
if ( d_Corner_Array_T != NULL ) CUDA_CHECK_ERROR( hipFree( d_Corner_Array_T ) );
# ifdef GRAVITY
if ( d_Pot_Array_T != NULL ) CUDA_CHECK_ERROR( hipFree( d_Pot_Array_T ) );
# endif
d_dt_Array_T = NULL;
d_Flu_Array_T = NULL;
d_Corner_Array_T = NULL;
# ifdef GRAVITY
d_Pot_Array_T = NULL;
# endif
// free the host memory allocated by CUDA
for (int t=0; t<2; t++)
{
if ( h_dt_Array_T [t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_dt_Array_T [t] ) );
if ( h_Flu_Array_T [t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_Flu_Array_T [t] ) );
if ( h_Corner_Array_T[t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_Corner_Array_T[t] ) );
# ifdef GRAVITY
if ( h_Pot_Array_T [t] != NULL ) CUDA_CHECK_ERROR( hipHostFree( h_Pot_Array_T [t] ) );
# endif
h_dt_Array_T [t] = NULL;
h_Flu_Array_T [t] = NULL;
h_Corner_Array_T[t] = NULL;
# ifdef GRAVITY
h_Pot_Array_T [t] = NULL;
# endif
} // for (int t=0; t<2; t++)
} // FUNCTION : CUAPI_MemFree_dt
#endif // #ifdef GPU
|
4131cc4d47b458b7fbb305edd3872006f586c430.cu
|
#include "CUAPI.h"
#ifdef GPU
extern real *d_dt_Array_T;
extern real (*d_Flu_Array_T)[NCOMP_FLUID][ CUBE(PS1) ];
extern double (*d_Corner_Array_T)[3];
#ifdef GRAVITY
extern real (*d_Pot_Array_T)[ CUBE(GRA_NXT) ];
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemFree_dt
// Description : Free the GPU and CPU memory previously allocated by CUAPI_MemAllocate_dt()
//
// Parameter : None
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemFree_dt()
{
// free the device memory
if ( d_dt_Array_T != NULL ) CUDA_CHECK_ERROR( cudaFree( d_dt_Array_T ) );
if ( d_Flu_Array_T != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Flu_Array_T ) );
if ( d_Corner_Array_T != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Corner_Array_T ) );
# ifdef GRAVITY
if ( d_Pot_Array_T != NULL ) CUDA_CHECK_ERROR( cudaFree( d_Pot_Array_T ) );
# endif
d_dt_Array_T = NULL;
d_Flu_Array_T = NULL;
d_Corner_Array_T = NULL;
# ifdef GRAVITY
d_Pot_Array_T = NULL;
# endif
// free the host memory allocated by CUDA
for (int t=0; t<2; t++)
{
if ( h_dt_Array_T [t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_dt_Array_T [t] ) );
if ( h_Flu_Array_T [t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_Flu_Array_T [t] ) );
if ( h_Corner_Array_T[t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_Corner_Array_T[t] ) );
# ifdef GRAVITY
if ( h_Pot_Array_T [t] != NULL ) CUDA_CHECK_ERROR( cudaFreeHost( h_Pot_Array_T [t] ) );
# endif
h_dt_Array_T [t] = NULL;
h_Flu_Array_T [t] = NULL;
h_Corner_Array_T[t] = NULL;
# ifdef GRAVITY
h_Pot_Array_T [t] = NULL;
# endif
} // for (int t=0; t<2; t++)
} // FUNCTION : CUAPI_MemFree_dt
#endif // #ifdef GPU
|
058ba64f6ec8b0403bec0126672b066df4f59098.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/linspace/linspace.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/linspace/linspace.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace {
template <typename T>
__global__ void kernel(T *dst, double start, double step, uint32_t n)
{
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
dst[i] = T(start + step*i);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace linspace {
template <typename T>
void exec_internal(T *dst, double start, double step, size_t n,
hipStream_t stream)
{
uint32_t threads = NR_THREADS;
uint32_t blocks = DIVUP(n, threads);
hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(threads), 0, stream, dst, start, step, n);
after_kernel_launch();
}
#define INST(T) template void exec_internal<T>(T *dst, \
double start, double step, size_t n, hipStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
} // namespace linspace
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
|
058ba64f6ec8b0403bec0126672b066df4f59098.cu
|
/**
* \file dnn/src/cuda/linspace/linspace.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/linspace/linspace.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace {
template <typename T>
__global__ void kernel(T *dst, double start, double step, uint32_t n)
{
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
dst[i] = T(start + step*i);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace linspace {
template <typename T>
void exec_internal(T *dst, double start, double step, size_t n,
cudaStream_t stream)
{
uint32_t threads = NR_THREADS;
uint32_t blocks = DIVUP(n, threads);
kernel<<<blocks, threads, 0, stream>>>(dst, start, step, n);
after_kernel_launch();
}
#define INST(T) template void exec_internal<T>(T *dst, \
double start, double step, size_t n, cudaStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
} // namespace linspace
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
|
9c90f1e2f037b76d1c4867dc73862ee219e74786.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bilateral_3d.hpp"
#include "helper_math.h"
void bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo, hipStream_t stream);
__global__
void __bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo);
void bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo, hipStream_t stream)
{
dim3 blockDim(8, 8, 8);
dim3 gridDim(
(dimx/blockDim.x + ((dimx%blockDim.x)?1:0)),
(dimy/blockDim.y + ((dimy%blockDim.y)?1:0)),
(dimz/blockDim.z + ((dimz%blockDim.z)?1:0)) );
size_t sharedMemSize = (blockDim.x+2*halo)*(blockDim.y+2*halo)*(blockDim.z+2*halo)*sizeof(float);
hipLaunchKernelGGL(( __bilateral_3d), dim3(gridDim), dim3(blockDim), sharedMemSize, stream,
deviceSrc, deviceDst, dimx, dimy, dimz, imageDensity, colorDensity, radius, halo);
}
inline __device__ __host__ int clamp_mirror(int f, int a, int b)
{
if(f<a) return (a+(a-f));
if(f>b) return (b-(f-b));
return f;
}
#define at(x, y, z, dimx, dimy, dimz) ( clamp_mirror((int)z, 0, dimz-1)*dimy*dimx + \
clamp_mirror((int)y, 0, dimy-1)*dimx + \
clamp_mirror((int)x, 0, dimx-1) )
__global__
void __bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo)
{
extern __shared__ float sharedMemSrc[];
int shared_index_1d, global_index_1d, index_1d;
int3 shared_index_3d, global_index_3d, index_3d;
// Multi batch reading here
int3 sharedMemDim = make_int3(blockDim.x+2*halo,
blockDim.y+2*halo,
blockDim.z+2*halo);
int sharedMemSize = sharedMemDim.x*sharedMemDim.y*sharedMemDim.z;
int3 blockSizeDim = make_int3(blockDim.x+0*halo,
blockDim.y+0*halo,
blockDim.z+0*halo);
int blockSize = blockSizeDim.x*blockSizeDim.y*blockSizeDim.z;
int numBatches = sharedMemSize/blockSize + ((sharedMemSize%blockSize)?1:0);
for(int batch=0; batch<numBatches; batch++)
{
shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x +
threadIdx.x +
blockSize*batch; //Magic is here [email protected]
shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) % (blockDim.x+2*halo),
(shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) / (blockDim.x+2*halo),
(shared_index_1d / ((blockDim.y+2*halo)*(blockDim.x+2*halo))) );
global_index_3d = make_int3(clamp_mirror(blockIdx.x * blockDim.x + shared_index_3d.x - halo, 0, dimx-1),
clamp_mirror(blockIdx.y * blockDim.y + shared_index_3d.y - halo, 0, dimy-1),
clamp_mirror(blockIdx.z * blockDim.z + shared_index_3d.z - halo, 0, dimz-1) );
global_index_1d = global_index_3d.z * dimy * dimx +
global_index_3d.y * dimx +
global_index_3d.x;
if (shared_index_3d.z < (blockDim.z + 2*halo))
{
if(global_index_3d.z >= 0 && global_index_3d.z < dimz &&
global_index_3d.y >= 0 && global_index_3d.y < dimy &&
global_index_3d.x >= 0 && global_index_3d.x < dimx)
{
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = deviceSrc[global_index_1d];
}
else
{
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = -1;
}
}
__syncthreads();
}
// Stencil processing here
float result = sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)];
float imageWeight, colorWeight, weight, totalWeight = 0.0f, pixel = 0.0f;
for(int z=threadIdx.z+halo-radius; z<=threadIdx.z+halo+radius; z++)
{
for(int y=threadIdx.y+halo-radius; y<=threadIdx.y+halo+radius; y++)
{
for(int x=threadIdx.x+halo-radius; x<=threadIdx.x+halo+radius; x++)
{
imageWeight = expf(-0.5f* ( (threadIdx.z+halo-z)*(threadIdx.z+halo-z) +
(threadIdx.y+halo-y)*(threadIdx.y+halo-y) +
(threadIdx.x+halo-x)*(threadIdx.x+halo-x) ) /(imageDensity*imageDensity));
colorWeight = expf(-0.5f* ( (sharedMemSrc[at(threadIdx.x+halo, threadIdx.y+halo, threadIdx.z+halo,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] -
sharedMemSrc[at(x, y, z,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]) *
(sharedMemSrc[at(threadIdx.x+halo, threadIdx.y+halo, threadIdx.z+halo,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] -
sharedMemSrc[at(x, y, z,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]) ) /(colorDensity*colorDensity));
weight = imageWeight * colorWeight;
result += (weight)* sharedMemSrc[at(x, y, z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)];
totalWeight += weight;
}
}
}
result /= totalWeight;
// Single pass writing here
index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
if (index_3d.z < dimz &&
index_3d.y < dimy &&
index_3d.x < dimx)
{
deviceDst[index_1d] = result;
}
}
|
9c90f1e2f037b76d1c4867dc73862ee219e74786.cu
|
#include "bilateral_3d.hpp"
#include "helper_math.h"
void bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo, cudaStream_t stream);
__global__
void __bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo);
void bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo, cudaStream_t stream)
{
dim3 blockDim(8, 8, 8);
dim3 gridDim(
(dimx/blockDim.x + ((dimx%blockDim.x)?1:0)),
(dimy/blockDim.y + ((dimy%blockDim.y)?1:0)),
(dimz/blockDim.z + ((dimz%blockDim.z)?1:0)) );
size_t sharedMemSize = (blockDim.x+2*halo)*(blockDim.y+2*halo)*(blockDim.z+2*halo)*sizeof(float);
__bilateral_3d<<<gridDim, blockDim, sharedMemSize, stream>>>
(deviceSrc, deviceDst, dimx, dimy, dimz, imageDensity, colorDensity, radius, halo);
}
inline __device__ __host__ int clamp_mirror(int f, int a, int b)
{
if(f<a) return (a+(a-f));
if(f>b) return (b-(f-b));
return f;
}
#define at(x, y, z, dimx, dimy, dimz) ( clamp_mirror((int)z, 0, dimz-1)*dimy*dimx + \
clamp_mirror((int)y, 0, dimy-1)*dimx + \
clamp_mirror((int)x, 0, dimx-1) )
__global__
void __bilateral_3d(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, float imageDensity, float colorDensity, int radius, int halo)
{
extern __shared__ float sharedMemSrc[];
int shared_index_1d, global_index_1d, index_1d;
int3 shared_index_3d, global_index_3d, index_3d;
// Multi batch reading here
int3 sharedMemDim = make_int3(blockDim.x+2*halo,
blockDim.y+2*halo,
blockDim.z+2*halo);
int sharedMemSize = sharedMemDim.x*sharedMemDim.y*sharedMemDim.z;
int3 blockSizeDim = make_int3(blockDim.x+0*halo,
blockDim.y+0*halo,
blockDim.z+0*halo);
int blockSize = blockSizeDim.x*blockSizeDim.y*blockSizeDim.z;
int numBatches = sharedMemSize/blockSize + ((sharedMemSize%blockSize)?1:0);
for(int batch=0; batch<numBatches; batch++)
{
shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x +
threadIdx.x +
blockSize*batch; //Magic is here [email protected]
shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) % (blockDim.x+2*halo),
(shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) / (blockDim.x+2*halo),
(shared_index_1d / ((blockDim.y+2*halo)*(blockDim.x+2*halo))) );
global_index_3d = make_int3(clamp_mirror(blockIdx.x * blockDim.x + shared_index_3d.x - halo, 0, dimx-1),
clamp_mirror(blockIdx.y * blockDim.y + shared_index_3d.y - halo, 0, dimy-1),
clamp_mirror(blockIdx.z * blockDim.z + shared_index_3d.z - halo, 0, dimz-1) );
global_index_1d = global_index_3d.z * dimy * dimx +
global_index_3d.y * dimx +
global_index_3d.x;
if (shared_index_3d.z < (blockDim.z + 2*halo))
{
if(global_index_3d.z >= 0 && global_index_3d.z < dimz &&
global_index_3d.y >= 0 && global_index_3d.y < dimy &&
global_index_3d.x >= 0 && global_index_3d.x < dimx)
{
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = deviceSrc[global_index_1d];
}
else
{
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = -1;
}
}
__syncthreads();
}
// Stencil processing here
float result = sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)];
float imageWeight, colorWeight, weight, totalWeight = 0.0f, pixel = 0.0f;
for(int z=threadIdx.z+halo-radius; z<=threadIdx.z+halo+radius; z++)
{
for(int y=threadIdx.y+halo-radius; y<=threadIdx.y+halo+radius; y++)
{
for(int x=threadIdx.x+halo-radius; x<=threadIdx.x+halo+radius; x++)
{
imageWeight = expf(-0.5f* ( (threadIdx.z+halo-z)*(threadIdx.z+halo-z) +
(threadIdx.y+halo-y)*(threadIdx.y+halo-y) +
(threadIdx.x+halo-x)*(threadIdx.x+halo-x) ) /(imageDensity*imageDensity));
colorWeight = expf(-0.5f* ( (sharedMemSrc[at(threadIdx.x+halo, threadIdx.y+halo, threadIdx.z+halo,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] -
sharedMemSrc[at(x, y, z,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]) *
(sharedMemSrc[at(threadIdx.x+halo, threadIdx.y+halo, threadIdx.z+halo,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] -
sharedMemSrc[at(x, y, z,
sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]) ) /(colorDensity*colorDensity));
weight = imageWeight * colorWeight;
result += (weight)* sharedMemSrc[at(x, y, z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)];
totalWeight += weight;
}
}
}
result /= totalWeight;
// Single pass writing here
index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
if (index_3d.z < dimz &&
index_3d.y < dimy &&
index_3d.x < dimx)
{
deviceDst[index_1d] = result;
}
}
|
a6a117cb623862fe7145e46a08ec52aa54ff02af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include "../NDArray.h"
#include "../NDArrayFactory.h"
#include "NativeOpExecutioner.h"
#include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h>
#include <ops.h>
#include <ops/gemm.h>
#include <pointercast.h>
#include <stdexcept>
#include <memory>
#include <helpers/logger.h>
#include <loops/pairwise_transform.h>
#include <loops/transform_same.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <indexing/NDIndex.h>
#include <indexing/IndicesList.h>
#include <helpers/ShapeUtils.h>
#include <sstream>
#include <helpers/ArrayUtils.h>
#include <MmulHelper.h>
#include <helpers/threshold.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <specials_cuda.h>
#include <loops/special_kernels.h>
#include <PointersManager.h>
#include "../NDArray.hpp"
#include <ConstantShapeHelper.h>
namespace nd4j {
void* NDArray::platformBuffer() { return specialBuffer(); }
void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); }
Nd4jLong* NDArray::getPlatformShapeInfo() const { return getSpecialShapeInfo(); }
Nd4jLong* NDArray::platformShapeInfo() { return specialShapeInfo(); }
void NDArray::syncToDevice() const {
auto currentDeviceId = AffinityManager::currentDeviceId();
if (currentDeviceId != _deviceId) {
// first of all we update shapeInfo
const_cast<NDArray*>(this)->setShapeInfo(this->getShapeInfo());
// now we actually migrate data buffer
_buffer->migrate();
}
_buffer->syncToSpecial();
}
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const { _buffer->writePrimary(); _buffer->readSpecial(); }
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const { if(!isActualOnHostSide()) syncToHost(); if(!isActualOnDeviceSide()) syncToDevice(); }
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void fillAsTriangularCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const T val, const int lower, const int upper) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int zRank, xRank, areSameOffsets; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
// if( (row + upper < col) || (row + lower > col) )
if((coords[zRank - 2] + upper < coords[zRank - 1]) || (coords[zRank - 2] + lower > coords[zRank - 1]))
z[zOffset] = val;
else if(vx != vz) { // when x and z are different arrays
if(xRank != zRank)
coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, const char direction, NDArray* target) {
if (isS())
throw std::runtime_error("NDArray::fillAsTriangular: you can't use this method on String array!");
if(target == nullptr)
target = this;
if(!isSameShape(target) && !(rankOf() == 1 && target->rankOf() == 2 && sizeAt(0) == target->sizeAt(0) && sizeAt(0) == target->sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
if (direction == 'u')
lower = -target->sizeAt(-2);
else if (direction == 'l')
upper = target->sizeAt(-1);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*target->getShapeInfo())) * target->rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({target}, {this});
hipLaunchKernelGGL(( fillAsTriangularCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), target->getPlatformBuffer(), target->getPlatformShapeInfo(), static_cast<T>(val), lower, upper);
NDArray::registerSpecialUse({target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void NDArray::fillAsTriangular, (const float val, int lower, int upper, const char direction, NDArray* target), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void identityMatrixCuda(void* vx, const Nd4jLong* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ int rank;
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto offset = shape::getOffset(xShapeInfo, coords);
if(coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val) {
hipLaunchKernelGGL(( identityMatrixCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS())
throw std::runtime_error("NDArray::setIdentity: you can't use this method on String array!");
// if (rankOf() != 2)
// throw std::runtime_error("NDArray::setIdentity: method should work only for 2D tensors. But " + toStringValue(rankOf()) + " was given.");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(getShapeInfo())) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), 1.f), LIBND4J_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
throw std::runtime_error("NDArray::swapUnsage method: both arrays must have the same data type");
if(specialBuffer() == nullptr || other.specialBuffer() == nullptr)
throw std::runtime_error("NDArray::swapUnsafe method: input array should not be empty!");
if(lengthOf() != other.lengthOf())
throw std::runtime_error("NDArray::swapUnsafe method: input arrays should have the same length!");
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe, (specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(), getContext()->getCudaStream()), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = hipStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0)
throw std::runtime_error(msg + std::string(": synchronization failed !"));
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables)
a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables)
a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
hipMemcpy(getSpecialShapeInfo(), getShapeInfo(), shape::shapeInfoByteLength(getShapeInfo()), hipMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void* NDArray::specialBufferWithOffset(Nd4jLong offset) const {
return getSpecialBuffer() != nullptr ? static_cast<int8_t*>(getSpecialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<Nd4jLong>& reps) const {
int dim = reps.size();
Nd4jLong product = 1;
for(const auto& item : reps)
product *= item;
if(product < 1)
throw std::runtime_error("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if(product==1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if(diff < 0) { // reshape to higher dimension
std::vector<Nd4jLong> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.getShapeInfo()+1, rankOld * sizeof(Nd4jLong)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(), dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
NDArray result(newBuff, ShapeDescriptor(newShapeInfo), getContext());
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH, (this->getSpecialBuffer(), this->getSpecialShapeInfo(), result.getSpecialBuffer(), result.getSpecialShapeInfo(), resultLen, stream), LIBND4J_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<Nd4jLong>& reps, NDArray& target) const {
auto repProd = shape::prodLong(reps.data(), reps.size());
if (repProd < 1)
throw std::runtime_error("NDArray::tile: reps can't contain 0s");
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if(!shape::equalsSoft(newShapeInfo, target.getShapeInfo())) {
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if(rankOf() > target.rankOf())
throw std::runtime_error("NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if(!ShapeUtils::areShapesBroadcastable(*this, target))
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__global__ static void repeatCuda(const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
const X* x = reinterpret_cast<const X*>(vx);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ int rank;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(zShapeInfo); // xRank = zRank
zLen = shape::length(zShapeInfo); // xLen <= zLen
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if(repSize > 1) {
for (uint j = 0; j < repSize; ++j) {
coords[axis] -= repeats[j];
if (coords[axis] < 0) {
coords[axis] = j;
break;
}
}
}
else
coords[axis] /= repeats[0];
z[zOffset] = x[shape::getOffset(xShapeInfo, coords)];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
static void repeatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
hipLaunchKernelGGL(( repeatCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, repeats, repSize, axis);
}
BUILD_DOUBLE_TEMPLATE(template void repeatCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int* repeats, const int repSize, const int axis), LIBND4J_TYPES, LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by repeats
NDArray* NDArray::repeat(const int axis, const std::vector<int>& repeats) const {
auto output = new NDArray('c', ShapeUtils::evalRepeatShape(axis, repeats, *this), dataType(), getContext());
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output->rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({output}, {this});
BUILD_SINGLE_SELECTOR_TWICE(dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES);
prepareSpecialUse({output}, {this});
manager.synchronize();
return output;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by repeats
void NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) const {
if(!target.isSameShape(ShapeUtils::evalRepeatShape(axis, repeats, *this)))
throw std::invalid_argument("NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) method: wrong shape of target array!");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = target.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(dataType(), target.dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES, LIBND4J_TYPES);
prepareSpecialUse({&target}, {this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void* NDArray::getSpecialBuffer() const {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if(_length == 0)
{ printf("NDArray::printActualBuffer: array length is zero !\n"); return; }
if(msg)
printf("%s", msg);
if(host) {
if(getBuffer() == nullptr || _length == 0)
{ printf("NDArray::printActualBuffer: host buffer is nullptr !\n"); return; }
const T* buff = bufferAsT<T>();
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
}
else {
if(getSpecialBuffer() == nullptr || _length == 0)
{ printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n"); return; }
void* pHost = operator new(sizeof(T) * _length);
if (ews() != 1) {
for (uint i = 0; i < _length; i++)
hipMemcpyAsync(reinterpret_cast<T*>(pHost) + i, specialBufferWithOffset(i), sizeof(T), hipMemcpyDeviceToHost, *(getContext()->getCudaStream()));
}
else
hipMemcpyAsync(pHost, getSpecialBuffer(), sizeOfT() * _length, hipMemcpyDeviceToHost, *getContext()->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*getContext()->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("NDArray::printSpecialBuffer: hipStreamSynchronize failed!");
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host,const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
#if defined(__HIPCC__) && !defined(BUILD_TESTS)
//#include <cpu/NDArrayLambda.hpp>
#endif
} // end namespace nd4j
#endif
|
a6a117cb623862fe7145e46a08ec52aa54ff02af.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#ifndef NDARRAY_CPP
#define NDARRAY_CPP
#include "../NDArray.h"
#include "../NDArrayFactory.h"
#include "NativeOpExecutioner.h"
#include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h>
#include <ops.h>
#include <ops/gemm.h>
#include <pointercast.h>
#include <stdexcept>
#include <memory>
#include <helpers/logger.h>
#include <loops/pairwise_transform.h>
#include <loops/transform_same.h>
#include <loops/random.h>
#include <loops/broadcasting.h>
#include <indexing/NDIndex.h>
#include <indexing/IndicesList.h>
#include <helpers/ShapeUtils.h>
#include <sstream>
#include <helpers/ArrayUtils.h>
#include <MmulHelper.h>
#include <helpers/threshold.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <specials_cuda.h>
#include <loops/special_kernels.h>
#include <PointersManager.h>
#include "../NDArray.hpp"
#include <ConstantShapeHelper.h>
namespace nd4j {
void* NDArray::platformBuffer() { return specialBuffer(); }
void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); }
Nd4jLong* NDArray::getPlatformShapeInfo() const { return getSpecialShapeInfo(); }
Nd4jLong* NDArray::platformShapeInfo() { return specialShapeInfo(); }
void NDArray::syncToDevice() const {
auto currentDeviceId = AffinityManager::currentDeviceId();
if (currentDeviceId != _deviceId) {
// first of all we update shapeInfo
const_cast<NDArray*>(this)->setShapeInfo(this->getShapeInfo());
// now we actually migrate data buffer
_buffer->migrate();
}
_buffer->syncToSpecial();
}
void NDArray::syncToHost() const { _buffer->syncToPrimary(getContext()); }
void NDArray::tickWriteHost() const { _buffer->writePrimary(); }
void NDArray::tickWriteDevice() const { _buffer->writeSpecial(); }
void NDArray::tickReadHost() const { _buffer->readPrimary(); }
void NDArray::tickReadDevice() const { _buffer->readSpecial(); }
void NDArray::tickBothActual() const { _buffer->writePrimary(); _buffer->readSpecial(); }
bool NDArray::isActualOnHostSide() const { return _buffer->isPrimaryActual(); }
bool NDArray::isActualOnDeviceSide() const { return _buffer->isSpecialActual(); }
void NDArray::makeBothBuffersActual() const { if(!isActualOnHostSide()) syncToHost(); if(!isActualOnDeviceSide()) syncToDevice(); }
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void fillAsTriangularCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const T val, const int lower, const int upper) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ int zRank, xRank, areSameOffsets; // xRank == zRank always, except when xRank = 1, in this case zRank = 2
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
// if( (row + upper < col) || (row + lower > col) )
if((coords[zRank - 2] + upper < coords[zRank - 1]) || (coords[zRank - 2] + lower > coords[zRank - 1]))
z[zOffset] = val;
else if(vx != vz) { // when x and z are different arrays
if(xRank != zRank)
coords[0] = coords[1];
const auto xOffset = areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::fillAsTriangular(const float val, int lower, int upper, const char direction, NDArray* target) {
if (isS())
throw std::runtime_error("NDArray::fillAsTriangular: you can't use this method on String array!");
if(target == nullptr)
target = this;
if(!isSameShape(target) && !(rankOf() == 1 && target->rankOf() == 2 && sizeAt(0) == target->sizeAt(0) && sizeAt(0) == target->sizeAt(1)))
throw std::string("NDArray::fillAsTriangular method: wrong shape of target array !");
if (direction == 'u')
lower = -target->sizeAt(-2);
else if (direction == 'l')
upper = target->sizeAt(-1);
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (target->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*target->getShapeInfo())) * target->rankOf() + 128;
PointersManager manager(getContext(), "NDArray::fillAsTriangular");
NDArray::prepareSpecialUse({target}, {this});
fillAsTriangularCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *getContext()->getCudaStream()>>>(getPlatformBuffer(), getPlatformShapeInfo(), target->getPlatformBuffer(), target->getPlatformShapeInfo(), static_cast<T>(val), lower, upper);
NDArray::registerSpecialUse({target}, {this});
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void NDArray::fillAsTriangular, (const float val, int lower, int upper, const char direction, NDArray* target), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void identityMatrixCuda(void* vx, const Nd4jLong* xShapeInfo, const T val) {
auto x = reinterpret_cast<T*>(vx);
__shared__ int rank;
__shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen == zLen, except when xRank = 1, in this case zLen = 2*xLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(xShapeInfo);
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(i, xShapeInfo, coords);
const auto offset = shape::getOffset(xShapeInfo, coords);
if(coords[rank - 2] == coords[rank - 1]) // row == col -> on diagonal
x[offset] = val;
else
x[offset] = static_cast<T>(0);
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void identityMatrixCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val) {
identityMatrixCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, static_cast<T>(val));
}
BUILD_SINGLE_TEMPLATE(template void identityMatrixCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* vx, const Nd4jLong *xShapeInfo, const float val), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
void NDArray::setIdentity() {
if (isS())
throw std::runtime_error("NDArray::setIdentity: you can't use this method on String array!");
// if (rankOf() != 2)
// throw std::runtime_error("NDArray::setIdentity: method should work only for 2D tensors. But " + toStringValue(rankOf()) + " was given.");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(getShapeInfo())) * rankOf() + 128;
PointersManager manager(getContext(), "NDArray::setIdentity");
syncToDevice();
BUILD_SINGLE_SELECTOR(dataType(), identityMatrixCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getPlatformBuffer(), getPlatformShapeInfo(), 1.f), LIBND4J_TYPES);
tickWriteDevice();
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void NDArray::swapUnsafe(NDArray& other) {
auto xType = this->dataType();
if (xType != other.dataType())
throw std::runtime_error("NDArray::swapUnsage method: both arrays must have the same data type");
if(specialBuffer() == nullptr || other.specialBuffer() == nullptr)
throw std::runtime_error("NDArray::swapUnsafe method: input array should not be empty!");
if(lengthOf() != other.lengthOf())
throw std::runtime_error("NDArray::swapUnsafe method: input arrays should have the same length!");
BUILD_SINGLE_SELECTOR(xType, templatedSwapUnsafe, (specialBuffer(), specialShapeInfo(), other.specialBuffer(), other.specialShapeInfo(), getContext()->getCudaStream()), LIBND4J_TYPES);
}
////////////////////////////////////////////////////////////////////////
void NDArray::synchronize(const char* msg) const {
auto res = cudaStreamSynchronize(*(getContext()->getCudaStream()));
if (res != 0)
throw std::runtime_error(msg + std::string(": synchronization failed !"));
}
////////////////////////////////////////////////////////////////////////
void NDArray::prepareSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToDevice();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocateSpecial();
if (synchronizeWritables)
a->syncToDevice();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerSpecialUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadDevice();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void NDArray::preparePrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList, bool synchronizeWritables) {
for (const auto& a : readList)
if(a != nullptr)
a->syncToHost();
for (const auto& a : writeList) {
if (a != nullptr) {
a->getDataBuffer()->allocatePrimary();
if (synchronizeWritables)
a->syncToHost();
}
}
}
////////////////////////////////////////////////////////////////////////
void NDArray::registerPrimaryUse(const std::initializer_list<const NDArray*>& writeList, const std::initializer_list<const NDArray*>& readList) {
for (const auto& p : readList)
if(p != nullptr)
p->tickReadHost();
for (const auto& p : writeList)
if (p != nullptr)
p->tickWriteHost();
}
//////////////////////////////////////////////////////////////////////////
void NDArray::syncShape() const {
cudaMemcpy(getSpecialShapeInfo(), getShapeInfo(), shape::shapeInfoByteLength(getShapeInfo()), cudaMemcpyHostToDevice);
}
//////////////////////////////////////////////////////////////////////////
void* NDArray::specialBufferWithOffset(Nd4jLong offset) const {
return getSpecialBuffer() != nullptr ? static_cast<int8_t*>(getSpecialBuffer()) + (offset * sizeOfT()) : nullptr;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
NDArray NDArray::tile(const std::vector<Nd4jLong>& reps) const {
int dim = reps.size();
Nd4jLong product = 1;
for(const auto& item : reps)
product *= item;
if(product < 1)
throw std::runtime_error("NDArray::tile method: one of the elements in reps array is zero !");
int rankOld = rankOf();
int diff = rankOld - dim;
if(product==1) { // in this case 2 possibilities are present: just reshape or nothing to do
NDArray result(*this);
if(diff < 0) { // reshape to higher dimension
std::vector<Nd4jLong> shapeNew = reps; // need to have unities at first "diff" positions of new shape
memcpy(&shapeNew[-diff], result.getShapeInfo()+1, rankOld * sizeof(Nd4jLong)); // put old shape numbers at rest of positions
result.reshapei(ordering(), shapeNew);
}
return result; // nothing to do, if diff >= 0 -> identity tile
}
// evaluate shapeInfo for resulting array
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
// create new buffer, in any case the memory amount new buffer points to is bigger then those for old _buffer
std::shared_ptr<DataBuffer> newBuff = std::make_shared<DataBuffer>(shape::length(newShapeInfo) * sizeOfT(), dataType(), getContext()->getWorkspace(), true);
// assign new shape and new buffer to resulting array
NDArray result(newBuff, ShapeDescriptor(newShapeInfo), getContext());
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto resultLen = result.lengthOf();
auto xType = this->dataType();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&result}, {this});
BUILD_SINGLE_SELECTOR(xType, tileKernelH, (this->getSpecialBuffer(), this->getSpecialShapeInfo(), result.getSpecialBuffer(), result.getSpecialShapeInfo(), resultLen, stream), LIBND4J_TYPES);
registerSpecialUse({&result}, {this});
return result;
}
//////////////////////////////////////////////////////////////////////////
// change an array by repeating it the number of times given by reps.
void NDArray::tile(const std::vector<Nd4jLong>& reps, NDArray& target) const {
auto repProd = shape::prodLong(reps.data(), reps.size());
if (repProd < 1)
throw std::runtime_error("NDArray::tile: reps can't contain 0s");
// evaluate true tile shapeInfo for comparison with target shapeInfo
auto newShapeInfo = ShapeUtils::evalTileShapeInfo(*this, reps, getContext()->getWorkspace());
if(!shape::equalsSoft(newShapeInfo, target.getShapeInfo())) {
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
}
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const int ews = target.ews();
const int targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
//////////////////////////////////////////////////////////////////////////
void NDArray::tile(NDArray& target) const {
if(rankOf() > target.rankOf())
throw std::runtime_error("NDArray::tile method - rank of target array must be bigger or equal to the rank of this array !");
if(!ShapeUtils::areShapesBroadcastable(*this, target))
throw std::runtime_error("NDArray::tile method - shapeInfo of target array is not suitable for tile operation !");
// fill newBuff, loop through all elements of newBuff
// looping through getBuffer() goes automatically by means of getSubArrayIndex applying
const auto ews = target.ews();
const auto targetLen = target.lengthOf();
auto stream = getContext()->getCudaStream();
prepareSpecialUse({&target}, {this});
BUILD_SINGLE_SELECTOR_TWICE(target.dataType(), tileKernelHH, (getSpecialBuffer(), getSpecialShapeInfo(), target.getSpecialBuffer(), target.getSpecialShapeInfo(), targetLen, ews, stream), LIBND4J_TYPES);
registerSpecialUse({&target}, {this});
}
////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
__global__ static void repeatCuda(const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
const X* x = reinterpret_cast<const X*>(vx);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ int rank;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
rank = shape::rank(zShapeInfo); // xRank = zRank
zLen = shape::length(zShapeInfo); // xLen <= zLen
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * rank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if(repSize > 1) {
for (uint j = 0; j < repSize; ++j) {
coords[axis] -= repeats[j];
if (coords[axis] < 0) {
coords[axis] = j;
break;
}
}
}
else
coords[axis] /= repeats[0];
z[zOffset] = x[shape::getOffset(xShapeInfo, coords)];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
static void repeatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int* repeats, const int repSize,
const int axis) {
repeatCuda<X,Z><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, repeats, repSize, axis);
}
BUILD_DOUBLE_TEMPLATE(template void repeatCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int* repeats, const int repSize, const int axis), LIBND4J_TYPES, LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////////
// create new array by repeating it the number of times given by repeats
NDArray* NDArray::repeat(const int axis, const std::vector<int>& repeats) const {
auto output = new NDArray('c', ShapeUtils::evalRepeatShape(axis, repeats, *this), dataType(), getContext());
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output->rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({output}, {this});
BUILD_SINGLE_SELECTOR_TWICE(dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES);
prepareSpecialUse({output}, {this});
manager.synchronize();
return output;
}
//////////////////////////////////////////////////////////////////////////
// fill array by repeating it the number of times given by repeats
void NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) const {
if(!target.isSameShape(ShapeUtils::evalRepeatShape(axis, repeats, *this)))
throw std::invalid_argument("NDArray::repeat(const int axis, const std::vector<int>& repeats, NDArray& target) method: wrong shape of target array!");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (target.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = target.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
PointersManager manager(getContext(), "NDArray::repeat(const int axis, const std::vector<int>& repeats)");
const int* reps = reinterpret_cast<int*>(manager.replicatePointer(repeats.data(), repeats.size() * sizeof(int)));
prepareSpecialUse({&target}, {this});
BUILD_DOUBLE_SELECTOR(dataType(), target.dataType(), repeatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, getContext()->getCudaStream(), getSpecialBuffer(), getSpecialShapeInfo(), target.specialBuffer(), target.specialShapeInfo(), reps, repeats.size(), axis), LIBND4J_TYPES, LIBND4J_TYPES);
prepareSpecialUse({&target}, {this});
manager.synchronize();
}
////////////////////////////////////////////////////////////////////////
void* NDArray::specialBuffer() {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
////////////////////////////////////////////////////////////////////////
void* NDArray::getSpecialBuffer() const {
if (_buffer->special() == nullptr)
return getBuffer();
// FIXME: this should be fixed once CUDA backend added
return static_cast<int8_t*>(_buffer->special()) + (_offset * sizeOfT());
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray::printCurrentBuffer(const bool host, const char* msg, const int precision) const {
if(_length == 0)
{ printf("NDArray::printActualBuffer: array length is zero !\n"); return; }
if(msg)
printf("%s", msg);
if(host) {
if(getBuffer() == nullptr || _length == 0)
{ printf("NDArray::printActualBuffer: host buffer is nullptr !\n"); return; }
const T* buff = bufferAsT<T>();
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)buff[getOffset(i)]);
printf("\n");
}
else {
if(getSpecialBuffer() == nullptr || _length == 0)
{ printf("NDArray::printSpecialBuffer: special buffer is nullptr !\n"); return; }
void* pHost = operator new(sizeof(T) * _length);
if (ews() != 1) {
for (uint i = 0; i < _length; i++)
cudaMemcpyAsync(reinterpret_cast<T*>(pHost) + i, specialBufferWithOffset(i), sizeof(T), cudaMemcpyDeviceToHost, *(getContext()->getCudaStream()));
}
else
cudaMemcpyAsync(pHost, getSpecialBuffer(), sizeOfT() * _length, cudaMemcpyDeviceToHost, *getContext()->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*getContext()->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("NDArray::printSpecialBuffer: cudaStreamSynchronize failed!");
for (uint i = 0; i < _length; i++)
printf("%.*f, ", precision, (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
}
template void NDArray::printCurrentBuffer<int>(const bool host,const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<float>(const bool host, const char* msg, const int precision) const;
template void NDArray::printCurrentBuffer<double>(const bool host, const char* msg, const int precision) const;
#if defined(__CUDACC__) && !defined(BUILD_TESTS)
//#include <cpu/NDArrayLambda.hpp>
#endif
} // end namespace nd4j
#endif
|
4c7cf3e3a5bd72875df53d468351a28daf11dcef.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO ORC writer class implementation
*/
#include "writer_impl.hpp"
#include <io/statistics/column_statistics.cuh>
#include <io/utilities/column_utils.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
#include <cstring>
#include <numeric>
#include <utility>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
using namespace cudf::io::orc;
using namespace cudf::io;
struct row_group_index_info {
int32_t pos = -1; // Position
int32_t blk_pos = -1; // Block Position
int32_t comp_pos = -1; // Compressed Position
int32_t comp_size = -1; // Compressed size
};
namespace {
/**
* @brief Helper for pinned host memory
*/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&hipHostFree)>;
/**
* @brief Function that translates GDF compression to ORC compression
*/
orc::CompressionKind to_orc_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY;
case compression_type::NONE: return orc::CompressionKind::NONE;
default: CUDF_EXPECTS(false, "Unsupported compression type"); return orc::CompressionKind::NONE;
}
}
/**
* @brief Function that translates GDF dtype to ORC datatype
*/
constexpr orc::TypeKind to_orc_type(cudf::type_id id)
{
switch (id) {
case cudf::type_id::INT8: return TypeKind::BYTE;
case cudf::type_id::INT16: return TypeKind::SHORT;
case cudf::type_id::INT32: return TypeKind::INT;
case cudf::type_id::INT64: return TypeKind::LONG;
case cudf::type_id::FLOAT32: return TypeKind::FLOAT;
case cudf::type_id::FLOAT64: return TypeKind::DOUBLE;
case cudf::type_id::BOOL8: return TypeKind::BOOLEAN;
case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE;
case cudf::type_id::TIMESTAMP_SECONDS:
case cudf::type_id::TIMESTAMP_MICROSECONDS:
case cudf::type_id::TIMESTAMP_MILLISECONDS:
case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP;
case cudf::type_id::STRING: return TypeKind::STRING;
case cudf::type_id::DECIMAL32:
case cudf::type_id::DECIMAL64: return TypeKind::DECIMAL;
case cudf::type_id::LIST: return TypeKind::LIST;
default: return TypeKind::INVALID_TYPE_KIND;
}
}
/**
* @brief Translates time unit to nanoscale multiple.
*/
constexpr int32_t to_clockscale(cudf::type_id timestamp_id)
{
switch (timestamp_id) {
case cudf::type_id::TIMESTAMP_SECONDS: return 9;
case cudf::type_id::TIMESTAMP_MILLISECONDS: return 6;
case cudf::type_id::TIMESTAMP_MICROSECONDS: return 3;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
default: return 0;
}
}
/**
* @brief Returns the precision of the given decimal type.
*/
constexpr auto orc_precision(cudf::type_id decimal_id)
{
switch (decimal_id) {
case cudf::type_id::DECIMAL32: return 9;
case cudf::type_id::DECIMAL64: return 18;
default: return 0;
}
}
} // namespace
/**
* @brief Helper class that adds ORC-specific column info
*/
class orc_column_view {
public:
/**
* @brief Constructor that extracts out the string position + length pairs
* for building dictionaries for string columns
*/
explicit orc_column_view(uint32_t index,
int str_idx,
int index_in_table,
column_view const& col,
const table_metadata* metadata)
: cudf_column{col},
_index{index},
_str_idx{str_idx},
_is_child{index_in_table < 0},
_type_width{cudf::is_fixed_width(col.type()) ? cudf::size_of(col.type()) : 0},
_scale{(to_orc_type(col.type().id()) == TypeKind::DECIMAL) ? -col.type().scale()
: to_clockscale(col.type().id())},
_precision{orc_precision(col.type().id())},
_type_kind{to_orc_type(col.type().id())}
{
// Don't assign names to child columns
if (index_in_table >= 0) {
if (metadata != nullptr && index_in_table < static_cast<int>(metadata->column_names.size())) {
_name = metadata->column_names[index_in_table];
} else {
// Generating default name if name isn't present in metadata
_name = "_col" + std::to_string(index_in_table);
}
}
}
auto is_string() const noexcept { return cudf_column.type().id() == type_id::STRING; }
void set_dict_stride(size_t stride) noexcept { _dict_stride = stride; }
auto dict_stride() const noexcept { return _dict_stride; }
/**
* @brief Function that associates an existing dictionary chunk allocation
*/
void attach_dict_chunk(gpu::DictionaryChunk const* host_dict,
gpu::DictionaryChunk const* dev_dict)
{
dict = host_dict;
d_dict = dev_dict;
}
auto host_dict_chunk(size_t rowgroup) const
{
CUDF_EXPECTS(is_string(), "Dictionary chunks are only present in string columns.");
return &dict[rowgroup * _dict_stride + _str_idx];
}
auto device_dict_chunk() const { return d_dict; }
auto const& decimal_offsets() const { return d_decimal_offsets; }
void attach_decimal_offsets(uint32_t* sizes_ptr) { d_decimal_offsets = sizes_ptr; }
/**
* @brief Function that associates an existing stripe dictionary allocation
*/
void attach_stripe_dict(gpu::StripeDictionary* host_stripe_dict,
gpu::StripeDictionary* dev_stripe_dict)
{
stripe_dict = host_stripe_dict;
d_stripe_dict = dev_stripe_dict;
}
auto host_stripe_dict(size_t stripe) const
{
CUDF_EXPECTS(is_string(), "Stripe dictionary is only present in string columns.");
return &stripe_dict[stripe * _dict_stride + _str_idx];
}
auto device_stripe_dict() const noexcept { return d_stripe_dict; }
// Index in the table
auto index() const noexcept { return _index; }
// Id in the ORC file
auto id() const noexcept { return _index + 1; }
auto is_child() const noexcept { return _is_child; }
auto type_width() const noexcept { return _type_width; }
auto size() const noexcept { return cudf_column.size(); }
auto null_count() const noexcept { return cudf_column.null_count(); }
auto null_mask() const noexcept { return cudf_column.null_mask(); }
bool nullable() const noexcept { return null_mask() != nullptr; }
auto scale() const noexcept { return _scale; }
auto precision() const noexcept { return _precision; }
void set_orc_encoding(ColumnEncodingKind e) noexcept { _encoding_kind = e; }
auto orc_kind() const noexcept { return _type_kind; }
auto orc_encoding() const noexcept { return _encoding_kind; }
auto orc_name() const noexcept { return _name; }
private:
column_view cudf_column;
// Identifier within the set of columns
uint32_t _index = 0;
// Identifier within the set of string columns
int _str_idx;
bool _is_child = false;
size_t _type_width = 0;
int32_t _scale = 0;
int32_t _precision = 0;
// ORC-related members
std::string _name{};
TypeKind _type_kind;
ColumnEncodingKind _encoding_kind;
// String dictionary-related members
size_t _dict_stride = 0;
gpu::DictionaryChunk const* dict = nullptr;
gpu::StripeDictionary const* stripe_dict = nullptr;
gpu::DictionaryChunk const* d_dict = nullptr;
gpu::StripeDictionary const* d_stripe_dict = nullptr;
// Offsets for encoded decimal elements. Used to enable direct writing of encoded decimal elements
// into the output stream.
uint32_t* d_decimal_offsets = nullptr;
};
size_type orc_table_view::num_rows() const noexcept
{
return columns.empty() ? 0 : columns.front().size();
}
/**
* @brief Gathers stripe information.
*
* @param columns List of columns
* @param rowgroup_bounds Ranges of rows in each rowgroup [rowgroup][column]
* @param max_stripe_bytes Maximum size of each stripe, in bytes
* @return List of stripe descriptors
*/
file_segmentation calculate_segmentation(host_span<orc_column_view const> columns,
hostdevice_2dvector<rowgroup_rows>&& rowgroup_bounds,
uint32_t max_stripe_bytes)
{
auto const is_any_column_string =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.is_string(); });
// Apply rows per stripe limit to limit string dictionaries
size_t const max_stripe_rows = is_any_column_string ? 1000000 : 5000000;
std::vector<stripe_rowgroups> infos;
auto const num_rowgroups = rowgroup_bounds.size().first;
size_t stripe_start = 0;
size_t stripe_bytes = 0;
size_t stripe_rows = 0;
for (size_t rg_idx = 0; rg_idx < num_rowgroups; ++rg_idx) {
auto const rowgroup_total_bytes =
std::accumulate(columns.begin(), columns.end(), 0ul, [&](size_t total_size, auto const& col) {
auto const rows = rowgroup_bounds[rg_idx][col.index()].size();
if (col.is_string()) {
const auto dt = col.host_dict_chunk(rg_idx);
return total_size + rows + dt->string_char_count;
} else {
return total_size + col.type_width() * rows;
}
});
auto const rowgroup_rows_max =
std::max_element(rowgroup_bounds[rg_idx].begin(),
rowgroup_bounds[rg_idx].end(),
[](auto& l, auto& r) { return l.size() < r.size(); })
->size();
// Check if adding the current rowgroup to the stripe will make the stripe too large or long
if ((rg_idx > stripe_start) && (stripe_bytes + rowgroup_total_bytes > max_stripe_bytes ||
stripe_rows + rowgroup_rows_max > max_stripe_rows)) {
infos.emplace_back(infos.size(), stripe_start, rg_idx - stripe_start);
stripe_start = rg_idx;
stripe_bytes = 0;
stripe_rows = 0;
}
stripe_bytes += rowgroup_total_bytes;
stripe_rows += rowgroup_rows_max;
if (rg_idx + 1 == num_rowgroups) {
infos.emplace_back(infos.size(), stripe_start, num_rowgroups - stripe_start);
}
}
return {std::move(rowgroup_bounds), std::move(infos)};
}
/**
* @brief Builds up column dictionaries indices
*
* @param orc_table Non-owning view of a cuDF table w/ ORC-related info
* @param rowgroup_bounds Ranges of rows in each rowgroup [rowgroup][column]
* @param dict_data Dictionary data memory
* @param dict_index Dictionary index memory
* @param dict List of dictionary chunks
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void init_dictionaries(orc_table_view& orc_table,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
hostdevice_2dvector<gpu::DictionaryChunk>* dict,
rmm::cuda_stream_view stream)
{
// Setup per-rowgroup dictionary indexes for each dictionary-aware column
for (auto col_idx : orc_table.string_column_indices) {
auto& str_column = orc_table.column(col_idx);
str_column.set_dict_stride(orc_table.num_string_columns());
str_column.attach_dict_chunk(dict->base_host_ptr(), dict->base_device_ptr());
}
// Allocate temporary memory for dictionary indices
std::vector<rmm::device_uvector<uint32_t>> dict_indices;
dict_indices.reserve(orc_table.num_string_columns());
std::transform(orc_table.string_column_indices.cbegin(),
orc_table.string_column_indices.cend(),
std::back_inserter(dict_indices),
[&](auto& col_idx) {
auto& str_column = orc_table.column(col_idx);
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
str_column.size(), stream);
});
// Create views of the temporary buffers in device memory
std::vector<device_span<uint32_t>> dict_indices_views;
dict_indices_views.reserve(dict_indices.size());
std::transform(
dict_indices.begin(), dict_indices.end(), std::back_inserter(dict_indices_views), [](auto& di) {
return device_span<uint32_t>{di};
});
auto d_dict_indices_views = cudf::detail::make_device_uvector_async(dict_indices_views, stream);
gpu::InitDictionaryIndices(orc_table.d_columns,
*dict,
dict_data,
dict_index,
d_dict_indices_views,
rowgroup_bounds,
orc_table.d_string_column_indices,
stream);
dict->device_to_host(stream, true);
}
void writer::impl::build_dictionaries(orc_table_view& orc_table,
host_span<stripe_rowgroups const> stripe_bounds,
hostdevice_2dvector<gpu::DictionaryChunk> const& dict,
host_span<rmm::device_uvector<uint32_t>> dict_index,
host_span<bool const> dictionary_enabled,
hostdevice_2dvector<gpu::StripeDictionary>& stripe_dict)
{
const auto num_rowgroups = dict.size().first;
for (size_t dict_idx = 0; dict_idx < orc_table.num_string_columns(); ++dict_idx) {
auto& str_column = orc_table.string_column(dict_idx);
str_column.attach_stripe_dict(stripe_dict.base_host_ptr(), stripe_dict.base_device_ptr());
for (auto const& stripe : stripe_bounds) {
auto& sd = stripe_dict[stripe.id][dict_idx];
sd.dict_data = str_column.host_dict_chunk(stripe.first)->dict_data;
sd.dict_index = dict_index[dict_idx].data(); // Indexed by abs row
sd.column_id = orc_table.string_column_indices[dict_idx];
sd.start_chunk = stripe.first;
sd.num_chunks = stripe.size;
sd.dict_char_count = 0;
sd.num_strings =
std::accumulate(stripe.cbegin(), stripe.cend(), 0, [&](auto dt_str_cnt, auto rg_idx) {
const auto& dt = dict[rg_idx][dict_idx];
return dt_str_cnt + dt.num_dict_strings;
});
sd.leaf_column = dict[0][dict_idx].leaf_column;
}
if (enable_dictionary_) {
struct string_column_cost {
size_t direct = 0;
size_t dictionary = 0;
};
auto const col_cost =
std::accumulate(stripe_bounds.front().cbegin(),
stripe_bounds.back().cend(),
string_column_cost{},
[&](auto cost, auto rg_idx) -> string_column_cost {
const auto& dt = dict[rg_idx][dict_idx];
return {cost.direct + dt.string_char_count,
cost.dictionary + dt.dict_char_count + dt.num_dict_strings};
});
// Disable dictionary if it does not reduce the output size
if (!dictionary_enabled[orc_table.string_column(dict_idx).index()] ||
col_cost.dictionary >= col_cost.direct) {
for (auto const& stripe : stripe_bounds) {
stripe_dict[stripe.id][dict_idx].dict_data = nullptr;
}
}
}
}
stripe_dict.host_to_device(stream);
gpu::BuildStripeDictionaries(stripe_dict, stripe_dict, dict, stream);
stripe_dict.device_to_host(stream, true);
}
constexpr size_t RLE_stream_size(TypeKind kind, size_t count)
{
using cudf::util::div_rounding_up_unsafe;
constexpr auto byte_rle_max_len = 128;
switch (kind) {
case TypeKind::BOOLEAN:
return div_rounding_up_unsafe(count, byte_rle_max_len * 8) * (byte_rle_max_len + 1);
case TypeKind::BYTE:
return div_rounding_up_unsafe(count, byte_rle_max_len) * (byte_rle_max_len + 1);
case TypeKind::SHORT:
return div_rounding_up_unsafe(count, gpu::encode_block_size) *
(gpu::encode_block_size * sizeof(int16_t) + 2);
case TypeKind::FLOAT:
case TypeKind::INT:
case TypeKind::DATE:
return div_rounding_up_unsafe(count, gpu::encode_block_size) *
(gpu::encode_block_size * sizeof(int32_t) + 2);
case TypeKind::LONG:
case TypeKind::DOUBLE:
return div_rounding_up_unsafe(count, gpu::encode_block_size) *
(gpu::encode_block_size * sizeof(int64_t) + 2);
default: CUDF_FAIL("Unsupported ORC type for RLE stream size");
}
}
orc_streams writer::impl::create_streams(host_span<orc_column_view> columns,
file_segmentation const& segmentation,
std::map<uint32_t, size_t> const& decimal_column_sizes)
{
// 'column 0' row index stream
std::vector<Stream> streams{{ROW_INDEX, 0}}; // TODO: Separate index and data streams?
// First n + 1 streams are row index streams
streams.reserve(columns.size() + 1);
std::transform(columns.begin(), columns.end(), std::back_inserter(streams), [](auto const& col) {
return Stream{ROW_INDEX, col.id()};
});
std::vector<int32_t> ids(columns.size() * gpu::CI_NUM_STREAMS, -1);
std::vector<TypeKind> types(streams.size(), INVALID_TYPE_KIND);
for (auto& column : columns) {
auto const is_nullable = [&]() -> bool {
if (single_write_mode) {
return column.nullable();
} else {
if (user_metadata_with_nullability.column_nullable.empty()) return true;
CUDF_EXPECTS(user_metadata_with_nullability.column_nullable.size() > column.index(),
"When passing values in user_metadata_with_nullability, data for all columns "
"must be specified");
return user_metadata_with_nullability.column_nullable[column.index()];
}
}();
auto RLE_column_size = [&](TypeKind type_kind) {
return std::accumulate(
thrust::make_counting_iterator(0ul),
thrust::make_counting_iterator(segmentation.num_rowgroups()),
0ul,
[&](auto data_size, auto rg_idx) {
return data_size +
RLE_stream_size(type_kind, segmentation.rowgroups[rg_idx][column.index()].size());
});
};
auto const kind = column.orc_kind();
auto add_stream =
[&](gpu::StreamIndexType index_type, StreamKind kind, TypeKind type_kind, size_t size) {
const auto base = column.index() * gpu::CI_NUM_STREAMS;
ids[base + index_type] = streams.size();
streams.push_back(orc::Stream{kind, column.id(), size});
types.push_back(type_kind);
};
auto add_RLE_stream = [&](
gpu::StreamIndexType index_type, StreamKind kind, TypeKind type_kind) {
add_stream(index_type, kind, type_kind, RLE_column_size(type_kind));
};
if (is_nullable) { add_RLE_stream(gpu::CI_PRESENT, PRESENT, TypeKind::BOOLEAN); }
switch (kind) {
case TypeKind::BOOLEAN:
case TypeKind::BYTE:
add_RLE_stream(gpu::CI_DATA, DATA, kind);
column.set_orc_encoding(DIRECT);
break;
case TypeKind::SHORT:
case TypeKind::INT:
case TypeKind::LONG:
case TypeKind::DATE:
add_RLE_stream(gpu::CI_DATA, DATA, kind);
column.set_orc_encoding(DIRECT_V2);
break;
case TypeKind::FLOAT:
case TypeKind::DOUBLE:
// Pass through if no nulls (no RLE encoding for floating point)
add_stream(
gpu::CI_DATA, DATA, kind, (column.null_count() != 0) ? RLE_column_size(kind) : 0);
column.set_orc_encoding(DIRECT);
break;
case TypeKind::STRING: {
bool enable_dict = enable_dictionary_;
size_t dict_data_size = 0;
size_t dict_strings = 0;
size_t dict_lengths_div512 = 0;
for (auto const& stripe : segmentation.stripes) {
const auto sd = column.host_stripe_dict(stripe.id);
enable_dict = (enable_dict && sd->dict_data != nullptr);
if (enable_dict) {
dict_strings += sd->num_strings;
dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9;
dict_data_size += sd->dict_char_count;
}
}
auto const direct_data_size =
std::accumulate(segmentation.stripes.front().cbegin(),
segmentation.stripes.back().cend(),
size_t{0},
[&](auto data_size, auto rg_idx) {
return data_size + column.host_dict_chunk(rg_idx)->string_char_count;
});
if (enable_dict) {
uint32_t dict_bits = 0;
for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) {
if (dict_strings <= (1ull << dict_bits)) break;
}
const auto valid_count = column.size() - column.null_count();
dict_data_size += (dict_bits * valid_count + 7) >> 3;
}
// Decide between direct or dictionary encoding
if (enable_dict && dict_data_size < direct_data_size) {
add_RLE_stream(gpu::CI_DATA, DATA, TypeKind::INT);
add_stream(gpu::CI_DATA2, LENGTH, TypeKind::INT, dict_lengths_div512 * (512 * 4 + 2));
add_stream(
gpu::CI_DICTIONARY, DICTIONARY_DATA, TypeKind::CHAR, ::max(dict_data_size, 1ul));
column.set_orc_encoding(DICTIONARY_V2);
} else {
add_stream(gpu::CI_DATA, DATA, TypeKind::CHAR, std::max<size_t>(direct_data_size, 1));
add_RLE_stream(gpu::CI_DATA2, LENGTH, TypeKind::INT);
column.set_orc_encoding(DIRECT_V2);
}
break;
}
case TypeKind::TIMESTAMP:
add_RLE_stream(gpu::CI_DATA, DATA, TypeKind::LONG);
add_RLE_stream(gpu::CI_DATA2, SECONDARY, TypeKind::LONG);
column.set_orc_encoding(DIRECT_V2);
break;
case TypeKind::DECIMAL:
// varint values (NO RLE)
// data_stream_size = decimal_column_sizes.at(column.index());
add_stream(gpu::CI_DATA, DATA, TypeKind::DECIMAL, decimal_column_sizes.at(column.index()));
// scale stream TODO: compute exact size since all elems are equal
add_RLE_stream(gpu::CI_DATA2, SECONDARY, TypeKind::INT);
column.set_orc_encoding(DIRECT_V2);
break;
case TypeKind::LIST:
// no data stream, only lengths
add_RLE_stream(gpu::CI_DATA2, LENGTH, TypeKind::INT);
column.set_orc_encoding(DIRECT_V2);
break;
default: CUDF_FAIL("Unsupported ORC type kind");
}
}
return {std::move(streams), std::move(ids), std::move(types)};
}
orc_streams::orc_stream_offsets orc_streams::compute_offsets(
host_span<orc_column_view const> columns, size_t num_rowgroups) const
{
std::vector<size_t> strm_offsets(streams.size());
size_t non_rle_data_size = 0;
size_t rle_data_size = 0;
for (size_t i = 0; i < streams.size(); ++i) {
const auto& stream = streams[i];
auto const is_rle_data = [&]() {
// First stream is an index stream, don't check types, etc.
if (!stream.column_index().has_value()) return true;
auto const& column = columns[stream.column_index().value()];
// Dictionary encoded string column - dictionary characters or
// directly encoded string - column characters
if (column.orc_kind() == TypeKind::STRING &&
((stream.kind == DICTIONARY_DATA && column.orc_encoding() == DICTIONARY_V2) ||
(stream.kind == DATA && column.orc_encoding() == DIRECT_V2)))
return false;
// Decimal data
if (column.orc_kind() == TypeKind::DECIMAL && stream.kind == DATA) return false;
// Everything else uses RLE
return true;
}();
// non-RLE and RLE streams are separated in the buffer that stores encoded data
// The computed offsets do not take the streams of the other type into account
if (is_rle_data) {
strm_offsets[i] = rle_data_size;
rle_data_size += (stream.length + 7) & ~7;
} else {
strm_offsets[i] = non_rle_data_size;
non_rle_data_size += stream.length;
}
}
non_rle_data_size = (non_rle_data_size + 7) & ~7;
return {std::move(strm_offsets), non_rle_data_size, rle_data_size};
}
struct segmented_valid_cnt_input {
bitmask_type const* mask;
std::vector<size_type> indices;
};
encoded_data writer::impl::encode_columns(orc_table_view const& orc_table,
string_dictionaries&& dictionaries,
encoder_decimal_info&& dec_chunk_sizes,
file_segmentation const& segmentation,
orc_streams const& streams)
{
auto const num_columns = orc_table.num_columns();
hostdevice_2dvector<gpu::EncChunk> chunks(num_columns, segmentation.num_rowgroups(), stream);
auto const stream_offsets =
streams.compute_offsets(orc_table.columns, segmentation.num_rowgroups());
rmm::device_uvector<uint8_t> encoded_data(stream_offsets.data_size(), stream);
// Initialize column chunks' descriptions
std::map<size_type, segmented_valid_cnt_input> validity_check_inputs;
for (auto const& column : orc_table.columns) {
for (auto const& stripe : segmentation.stripes) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto& ck = chunks[column.index()][rg_idx];
ck.start_row = segmentation.rowgroups[rg_idx][column.index()].begin;
ck.num_rows = segmentation.rowgroups[rg_idx][column.index()].size();
ck.encoding_kind = column.orc_encoding();
ck.type_kind = column.orc_kind();
if (ck.type_kind == TypeKind::STRING) {
ck.dict_index = (ck.encoding_kind == DICTIONARY_V2)
? column.host_stripe_dict(stripe.id)->dict_index
: nullptr;
ck.dtype_len = 1;
} else {
ck.dtype_len = column.type_width();
}
ck.scale = column.scale();
if (ck.type_kind == TypeKind::DECIMAL) {
ck.decimal_offsets = device_span<uint32_t>{column.decimal_offsets(), ck.num_rows};
}
}
}
}
auto validity_check_indices = [&](size_t col_idx) {
std::vector<size_type> indices;
for (auto const& stripe : segmentation.stripes) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend() - 1; ++rg_idx_it) {
auto const& chunk = chunks[col_idx][*rg_idx_it];
indices.push_back(chunk.start_row);
indices.push_back(chunk.start_row + chunk.num_rows);
}
}
return indices;
};
for (auto const& column : orc_table.columns) {
if (column.orc_kind() == TypeKind::BOOLEAN && column.nullable()) {
validity_check_inputs[column.index()] = {column.null_mask(),
validity_check_indices(column.index())};
}
}
for (auto& cnt_in : validity_check_inputs) {
auto const valid_counts = segmented_count_set_bits(cnt_in.second.mask, cnt_in.second.indices);
CUDF_EXPECTS(
std::none_of(valid_counts.cbegin(),
valid_counts.cend(),
[](auto valid_count) { return valid_count % 8; }),
"There's currently a bug in encoding boolean columns. Suggested workaround is to convert "
"to int8 type."
" Please see https://github.com/rapidsai/cudf/issues/6763 for more information.");
}
hostdevice_2dvector<gpu::encoder_chunk_streams> chunk_streams(
num_columns, segmentation.num_rowgroups(), stream);
for (size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto const& column = orc_table.column(col_idx);
auto col_streams = chunk_streams[col_idx];
for (auto const& stripe : segmentation.stripes) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto const& ck = chunks[col_idx][rg_idx];
auto& strm = col_streams[rg_idx];
for (int strm_type = 0; strm_type < gpu::CI_NUM_STREAMS; ++strm_type) {
auto const strm_id = streams.id(col_idx * gpu::CI_NUM_STREAMS + strm_type);
strm.ids[strm_type] = strm_id;
if (strm_id >= 0) {
if ((strm_type == gpu::CI_DICTIONARY) ||
(strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)) {
if (rg_idx_it == stripe.cbegin()) {
const int32_t dict_stride = column.dict_stride();
const auto stripe_dict = column.host_stripe_dict(stripe.id);
strm.lengths[strm_type] =
(strm_type == gpu::CI_DICTIONARY)
? stripe_dict->dict_char_count
: (((stripe_dict->num_strings + 0x1ff) >> 9) * (512 * 4 + 2));
if (stripe.id == 0) {
strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.offsets[strm_id];
// Dictionary lengths are encoded as RLE, which are all stored after non-RLE data:
// include non-RLE data size in the offset only in that case
if (strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)
strm.data_ptrs[strm_type] += stream_offsets.non_rle_data_size;
} else {
auto const& strm_up = col_streams[stripe_dict[-dict_stride].start_chunk];
strm.data_ptrs[strm_type] =
strm_up.data_ptrs[strm_type] + strm_up.lengths[strm_type];
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = col_streams[rg_idx - 1].data_ptrs[strm_type];
}
} else if (strm_type == gpu::CI_DATA && ck.type_kind == TypeKind::STRING &&
ck.encoding_kind == DIRECT_V2) {
strm.lengths[strm_type] = column.host_dict_chunk(rg_idx)->string_char_count;
strm.data_ptrs[strm_type] = (rg_idx == 0)
? encoded_data.data() + stream_offsets.offsets[strm_id]
: (col_streams[rg_idx - 1].data_ptrs[strm_type] +
col_streams[rg_idx - 1].lengths[strm_type]);
} else if (strm_type == gpu::CI_DATA && streams[strm_id].length == 0 &&
(ck.type_kind == DOUBLE || ck.type_kind == FLOAT)) {
// Pass-through
strm.lengths[strm_type] = ck.num_rows * ck.dtype_len;
strm.data_ptrs[strm_type] = nullptr;
} else if (ck.type_kind == DECIMAL && strm_type == gpu::CI_DATA) {
strm.lengths[strm_type] = dec_chunk_sizes.rg_sizes.at(col_idx)[rg_idx];
strm.data_ptrs[strm_type] = (rg_idx == 0)
? encoded_data.data() + stream_offsets.offsets[strm_id]
: (col_streams[rg_idx - 1].data_ptrs[strm_type] +
col_streams[rg_idx - 1].lengths[strm_type]);
} else {
strm.lengths[strm_type] = RLE_stream_size(streams.type(strm_id), ck.num_rows);
// RLE encoded streams are stored after all non-RLE streams
strm.data_ptrs[strm_type] =
(rg_idx == 0) ? (encoded_data.data() + stream_offsets.non_rle_data_size +
stream_offsets.offsets[strm_id])
: (col_streams[rg_idx - 1].data_ptrs[strm_type] +
col_streams[rg_idx - 1].lengths[strm_type]);
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = nullptr;
}
}
}
}
}
chunks.host_to_device(stream);
chunk_streams.host_to_device(stream);
gpu::set_chunk_columns(orc_table.d_columns, chunks, stream);
if (orc_table.num_string_columns() != 0) {
auto d_stripe_dict = orc_table.string_column(0).device_stripe_dict();
gpu::EncodeStripeDictionaries(d_stripe_dict,
chunks,
orc_table.num_string_columns(),
segmentation.num_stripes(),
chunk_streams,
stream);
}
gpu::EncodeOrcColumnData(chunks, chunk_streams, stream);
dictionaries.data.clear();
dictionaries.index.clear();
stream.synchronize();
return {std::move(encoded_data), std::move(chunk_streams)};
}
std::vector<StripeInformation> writer::impl::gather_stripes(
size_t num_index_streams,
file_segmentation const& segmentation,
hostdevice_2dvector<gpu::encoder_chunk_streams>* enc_streams,
hostdevice_2dvector<gpu::StripeStream>* strm_desc)
{
std::vector<StripeInformation> stripes(segmentation.num_stripes());
for (auto const& stripe : segmentation.stripes) {
for (size_t col_idx = 0; col_idx < enc_streams->size().first; col_idx++) {
const auto& strm = (*enc_streams)[col_idx][stripe.first];
// Assign stream data of column data stream(s)
for (int k = 0; k < gpu::CI_INDEX; k++) {
const auto stream_id = strm.ids[k];
if (stream_id != -1) {
auto* ss = &(*strm_desc)[stripe.id][stream_id - num_index_streams];
ss->stream_size = 0;
ss->first_chunk_id = stripe.first;
ss->num_chunks = stripe.size;
ss->column_id = col_idx;
ss->stream_type = k;
}
}
}
stripes[stripe.id].numberOfRows =
stripe.size == 0 ? 0
: segmentation.rowgroups[stripe.first + stripe.size - 1][0].end -
segmentation.rowgroups[stripe.first][0].begin;
}
strm_desc->host_to_device(stream);
gpu::CompactOrcDataStreams(*strm_desc, *enc_streams, stream);
strm_desc->device_to_host(stream);
enc_streams->device_to_host(stream, true);
return stripes;
}
void set_stat_desc_leaf_cols(device_span<orc_column_device_view const> columns,
device_span<stats_column_desc> stat_desc,
rmm::cuda_stream_view stream)
{
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
thrust::make_counting_iterator(stat_desc.size()),
[=] __device__(auto idx) { stat_desc[idx].leaf_column = &columns[idx].cudf_column; });
}
std::vector<std::vector<uint8_t>> writer::impl::gather_statistic_blobs(
orc_table_view const& orc_table, file_segmentation const& segmentation)
{
auto const num_stat_blobs = (1 + segmentation.num_stripes()) * orc_table.num_columns();
hostdevice_vector<stats_column_desc> stat_desc(orc_table.num_columns(), stream);
hostdevice_vector<statistics_merge_group> stat_merge(num_stat_blobs, stream);
for (auto const& column : orc_table.columns) {
stats_column_desc* desc = &stat_desc[column.index()];
switch (column.orc_kind()) {
case TypeKind::BYTE: desc->stats_dtype = dtype_int8; break;
case TypeKind::SHORT: desc->stats_dtype = dtype_int16; break;
case TypeKind::INT: desc->stats_dtype = dtype_int32; break;
case TypeKind::LONG: desc->stats_dtype = dtype_int64; break;
case TypeKind::FLOAT: desc->stats_dtype = dtype_float32; break;
case TypeKind::DOUBLE: desc->stats_dtype = dtype_float64; break;
case TypeKind::BOOLEAN: desc->stats_dtype = dtype_bool; break;
case TypeKind::DATE: desc->stats_dtype = dtype_int32; break;
case TypeKind::DECIMAL: desc->stats_dtype = dtype_decimal64; break;
case TypeKind::TIMESTAMP: desc->stats_dtype = dtype_timestamp64; break;
case TypeKind::STRING: desc->stats_dtype = dtype_string; break;
default: desc->stats_dtype = dtype_none; break;
}
desc->num_rows = column.size();
desc->num_values = column.size();
if (desc->stats_dtype == dtype_timestamp64) {
// Timestamp statistics are in milliseconds
switch (column.scale()) {
case 9: desc->ts_scale = 1000; break;
case 6: desc->ts_scale = 0; break;
case 3: desc->ts_scale = -1000; break;
case 0: desc->ts_scale = -1000000; break;
default: desc->ts_scale = 0; break;
}
} else {
desc->ts_scale = 0;
}
for (auto const& stripe : segmentation.stripes) {
auto grp = &stat_merge[column.index() * segmentation.num_stripes() + stripe.id];
grp->col = stat_desc.device_ptr(column.index());
grp->start_chunk =
static_cast<uint32_t>(column.index() * segmentation.num_rowgroups() + stripe.first);
grp->num_chunks = stripe.size;
}
statistics_merge_group* col_stats =
&stat_merge[segmentation.num_stripes() * orc_table.num_columns() + column.index()];
col_stats->col = stat_desc.device_ptr(column.index());
col_stats->start_chunk = static_cast<uint32_t>(column.index() * segmentation.num_stripes());
col_stats->num_chunks = static_cast<uint32_t>(segmentation.num_stripes());
}
stat_desc.host_to_device(stream);
stat_merge.host_to_device(stream);
set_stat_desc_leaf_cols(orc_table.d_columns, stat_desc, stream);
auto const num_chunks = segmentation.rowgroups.count();
rmm::device_uvector<statistics_chunk> stat_chunks(num_chunks + num_stat_blobs, stream);
rmm::device_uvector<statistics_group> stat_groups(num_chunks, stream);
gpu::orc_init_statistics_groups(
stat_groups.data(), stat_desc.device_ptr(), segmentation.rowgroups, stream);
detail::calculate_group_statistics<detail::io_file_format::ORC>(
stat_chunks.data(), stat_groups.data(), num_chunks, stream);
detail::merge_group_statistics<detail::io_file_format::ORC>(
stat_chunks.data() + num_chunks,
stat_chunks.data(),
stat_merge.device_ptr(),
segmentation.num_stripes() * orc_table.num_columns(),
stream);
detail::merge_group_statistics<detail::io_file_format::ORC>(
stat_chunks.data() + num_chunks + segmentation.num_stripes() * orc_table.num_columns(),
stat_chunks.data() + num_chunks,
stat_merge.device_ptr(segmentation.num_stripes() * orc_table.num_columns()),
orc_table.num_columns(),
stream);
gpu::orc_init_statistics_buffersize(
stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream);
stat_merge.device_to_host(stream, true);
hostdevice_vector<uint8_t> blobs(
stat_merge[num_stat_blobs - 1].start_chunk + stat_merge[num_stat_blobs - 1].num_chunks, stream);
gpu::orc_encode_statistics(blobs.device_ptr(),
stat_merge.device_ptr(),
stat_chunks.data() + num_chunks,
num_stat_blobs,
stream);
stat_merge.device_to_host(stream);
blobs.device_to_host(stream, true);
std::vector<std::vector<uint8_t>> stat_blobs(num_stat_blobs);
for (size_t i = 0; i < num_stat_blobs; i++) {
const uint8_t* stat_begin = blobs.host_ptr(stat_merge[i].start_chunk);
const uint8_t* stat_end = stat_begin + stat_merge[i].num_chunks;
stat_blobs[i].assign(stat_begin, stat_end);
}
return stat_blobs;
}
void writer::impl::write_index_stream(int32_t stripe_id,
int32_t stream_id,
host_span<orc_column_view const> columns,
stripe_rowgroups const& rowgroups_range,
host_2dspan<gpu::encoder_chunk_streams const> enc_streams,
host_2dspan<gpu::StripeStream const> strm_desc,
host_span<gpu_inflate_status_s const> comp_out,
StripeInformation* stripe,
orc_streams* streams,
ProtobufWriter* pbw)
{
row_group_index_info present;
row_group_index_info data;
row_group_index_info data2;
auto kind = TypeKind::STRUCT;
auto const column_id = stream_id - 1;
auto find_record = [=, &strm_desc](gpu::encoder_chunk_streams const& stream,
gpu::StreamIndexType type) {
row_group_index_info record;
if (stream.ids[type] > 0) {
record.pos = 0;
if (compression_kind_ != NONE) {
auto const& ss = strm_desc[stripe_id][stream.ids[type] - (columns.size() + 1)];
record.blk_pos = ss.first_block;
record.comp_pos = 0;
record.comp_size = ss.stream_size;
}
}
return record;
};
auto scan_record = [=, &comp_out](gpu::encoder_chunk_streams const& stream,
gpu::StreamIndexType type,
row_group_index_info& record) {
if (record.pos >= 0) {
record.pos += stream.lengths[type];
while ((record.pos >= 0) && (record.blk_pos >= 0) &&
(static_cast<size_t>(record.pos) >= compression_blocksize_) &&
(record.comp_pos + 3 + comp_out[record.blk_pos].bytes_written <
static_cast<size_t>(record.comp_size))) {
record.pos -= compression_blocksize_;
record.comp_pos += 3 + comp_out[record.blk_pos].bytes_written;
record.blk_pos += 1;
}
}
};
// TBD: Not sure we need an empty index stream for column 0
if (stream_id != 0) {
const auto& strm = enc_streams[column_id][0];
present = find_record(strm, gpu::CI_PRESENT);
data = find_record(strm, gpu::CI_DATA);
data2 = find_record(strm, gpu::CI_DATA2);
// Change string dictionary to int from index point of view
kind = columns[column_id].orc_kind();
if (kind == TypeKind::STRING && columns[column_id].orc_encoding() == DICTIONARY_V2) {
kind = TypeKind::INT;
}
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
// Add row index entries
std::for_each(rowgroups_range.cbegin(), rowgroups_range.cend(), [&](auto rowgroup) {
pbw->put_row_index_entry(
present.comp_pos, present.pos, data.comp_pos, data.pos, data2.comp_pos, data2.pos, kind);
if (stream_id != 0) {
const auto& strm = enc_streams[column_id][rowgroup];
scan_record(strm, gpu::CI_PRESENT, present);
scan_record(strm, gpu::CI_DATA, data);
scan_record(strm, gpu::CI_DATA2, data2);
}
});
(*streams)[stream_id].length = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_ix_len = (uint32_t)((*streams)[stream_id].length - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
stripe->indexLength += buffer_.size();
}
void writer::impl::write_data_stream(gpu::StripeStream const& strm_desc,
gpu::encoder_chunk_streams const& enc_stream,
uint8_t const* compressed_data,
uint8_t* stream_out,
StripeInformation* stripe,
orc_streams* streams)
{
const auto length = strm_desc.stream_size;
(*streams)[enc_stream.ids[strm_desc.stream_type]].length = length;
if (length == 0) { return; }
const auto* stream_in = (compression_kind_ == NONE) ? enc_stream.data_ptrs[strm_desc.stream_type]
: (compressed_data + strm_desc.bfr_offset);
if (out_sink_->is_device_write_preferred(length)) {
out_sink_->device_write(stream_in, length, stream);
} else {
CUDA_TRY(
hipMemcpyAsync(stream_out, stream_in, length, hipMemcpyDeviceToHost, stream.value()));
stream.synchronize();
out_sink_->host_write(stream_out, length);
}
stripe->dataLength += length;
}
void writer::impl::add_uncompressed_block_headers(std::vector<uint8_t>& v)
{
if (compression_kind_ != NONE) {
size_t uncomp_len = v.size() - 3, pos = 0, block_len;
while (uncomp_len > compression_blocksize_) {
block_len = compression_blocksize_ * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
pos += 3 + compression_blocksize_;
v.insert(v.begin() + pos, 3, 0);
uncomp_len -= compression_blocksize_;
}
block_len = uncomp_len * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
}
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
user_metadata(options.get_metadata()),
stream(stream),
_mr(mr)
{
init_state();
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
stream(stream),
_mr(mr)
{
if (options.get_metadata() != nullptr) {
user_metadata_with_nullability = *options.get_metadata();
user_metadata = &user_metadata_with_nullability;
}
init_state();
}
writer::impl::~impl() { close(); }
void writer::impl::init_state()
{
// Write file header
out_sink_->host_write(MAGIC, std::strlen(MAGIC));
}
/**
* @brief pre-order append ORC device columns
*/
void __device__ append_orc_device_column(uint32_t& idx,
thrust::optional<uint32_t> parent_idx,
device_span<orc_column_device_view> cols,
column_device_view col)
{
auto const current_idx = idx;
cols[current_idx] = orc_column_device_view{col, parent_idx};
idx++;
if (col.type().id() == type_id::LIST) {
append_orc_device_column(
idx, current_idx, cols, col.child(lists_column_view::child_column_index));
}
if (col.type().id() == type_id::STRUCT) {
for (auto child_idx = 0; child_idx < col.num_child_columns(); ++child_idx) {
append_orc_device_column(idx, current_idx, cols, col.child(child_idx));
}
}
};
orc_table_view make_orc_table_view(table_view const& table,
table_device_view const& d_table,
table_metadata const* user_metadata,
rmm::cuda_stream_view stream)
{
std::vector<orc_column_view> orc_columns;
std::vector<uint32_t> str_col_indexes;
std::function<void(column_view const&, int)> append_orc_column = [&](column_view const& col,
int index_in_table) {
int const str_idx =
(col.type().id() == type_id::STRING) ? static_cast<int>(str_col_indexes.size()) : -1;
auto const& new_col =
orc_columns.emplace_back(orc_columns.size(), str_idx, index_in_table, col, user_metadata);
if (new_col.is_string()) { str_col_indexes.push_back(new_col.index()); }
if (col.type().id() == type_id::LIST)
append_orc_column(col.child(lists_column_view::child_column_index), -1);
if (col.type().id() == type_id::STRUCT)
for (auto child = col.child_begin(); child != col.child_end(); ++child)
append_orc_column(*child, -1);
};
for (auto col_idx = 0; col_idx < table.num_columns(); ++col_idx) {
append_orc_column(table.column(col_idx), col_idx);
}
rmm::device_uvector<orc_column_device_view> d_orc_columns(orc_columns.size(), stream);
cudf::detail::device_single_thread(
[d_orc_cols = device_span<orc_column_device_view>{d_orc_columns},
d_table = d_table] __device__() mutable {
uint32_t idx = 0;
for (auto const& column : d_table) {
append_orc_device_column(idx, thrust::nullopt, d_orc_cols, column);
}
},
stream);
return {std::move(orc_columns),
std::move(d_orc_columns),
str_col_indexes,
cudf::detail::make_device_uvector_sync(str_col_indexes, stream)};
}
hostdevice_2dvector<rowgroup_rows> calculate_rowgroup_bounds(orc_table_view const& orc_table,
size_type rowgroup_size,
rmm::cuda_stream_view stream)
{
auto const num_rowgroups =
cudf::util::div_rounding_up_unsafe<size_t, size_t>(orc_table.num_rows(), rowgroup_size);
hostdevice_2dvector<rowgroup_rows> rowgroup_bounds(
num_rowgroups, orc_table.num_columns(), stream);
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
num_rowgroups,
[cols = device_span<orc_column_device_view const>{orc_table.d_columns},
rg_bounds = device_2dspan<rowgroup_rows>{rowgroup_bounds},
rowgroup_size] __device__(auto rg_idx) mutable {
thrust::transform(
thrust::seq, cols.begin(), cols.end(), rg_bounds[rg_idx].begin(), [&](auto const& col) {
// Root column
if (!col.parent_index.has_value()) {
size_type const rows_begin = rg_idx * rowgroup_size;
auto const rows_end =
thrust::min<size_type>((rg_idx + 1) * rowgroup_size, col.cudf_column.size());
return rowgroup_rows{rows_begin, rows_end};
} else {
// Child column
auto const parent_index = *col.parent_index;
column_device_view parent_col = cols[parent_index].cudf_column;
if (parent_col.type().id() != type_id::LIST) return rg_bounds[rg_idx][parent_index];
auto parent_offsets = parent_col.child(lists_column_view::offsets_column_index);
auto const& parent_rowgroup_rows = rg_bounds[rg_idx][parent_index];
auto const rows_begin = parent_offsets.element<size_type>(parent_rowgroup_rows.begin);
auto const rows_end = parent_offsets.element<size_type>(parent_rowgroup_rows.end);
return rowgroup_rows{rows_begin, rows_end};
}
});
});
rowgroup_bounds.device_to_host(stream, true);
return rowgroup_bounds;
}
// returns host vector of per-rowgroup sizes
encoder_decimal_info decimal_chunk_sizes(orc_table_view& orc_table,
file_segmentation const& segmentation,
rmm::cuda_stream_view stream)
{
std::map<uint32_t, rmm::device_uvector<uint32_t>> elem_sizes;
// Compute per-element offsets (within each row group) on the device
for (auto& orc_col : orc_table.columns) {
if (orc_col.orc_kind() == DECIMAL) {
auto& current_sizes =
elem_sizes.insert({orc_col.index(), rmm::device_uvector<uint32_t>(orc_col.size(), stream)})
.first->second;
thrust::tabulate(rmm::exec_policy(stream),
current_sizes.begin(),
current_sizes.end(),
[d_cols = device_span<orc_column_device_view const>{orc_table.d_columns},
col_idx = orc_col.index()] __device__(auto idx) {
auto const& col = d_cols[col_idx].cudf_column;
if (col.is_null(idx)) return 0u;
int64_t const element = (col.type().id() == type_id::DECIMAL32)
? col.element<int32_t>(idx)
: col.element<int64_t>(idx);
int64_t const sign = (element < 0) ? 1 : 0;
uint64_t zigzaged_value = ((element ^ -sign) * 2) + sign;
uint32_t encoded_length = 1;
while (zigzaged_value > 127) {
zigzaged_value >>= 7u;
++encoded_length;
}
return encoded_length;
});
// Compute element offsets within each row group
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
segmentation.num_rowgroups(),
[sizes = device_span<uint32_t>{current_sizes},
rg_bounds = device_2dspan<rowgroup_rows const>{segmentation.rowgroups},
col_idx = orc_col.index()] __device__(auto rg_idx) {
auto const& range = rg_bounds[rg_idx][col_idx];
thrust::inclusive_scan(thrust::seq,
sizes.begin() + range.begin,
sizes.begin() + range.end,
sizes.begin() + range.begin);
});
orc_col.attach_decimal_offsets(current_sizes.data());
}
}
if (elem_sizes.empty()) return {};
// Gather the row group sizes and copy to host
auto d_tmp_rowgroup_sizes = rmm::device_uvector<uint32_t>(segmentation.num_rowgroups(), stream);
std::map<uint32_t, std::vector<uint32_t>> rg_sizes;
for (auto const& [col_idx, esizes] : elem_sizes) {
// Copy last elem in each row group - equal to row group size
thrust::tabulate(rmm::exec_policy(stream),
d_tmp_rowgroup_sizes.begin(),
d_tmp_rowgroup_sizes.end(),
[src = esizes.data(),
col_idx = col_idx,
rg_bounds = device_2dspan<rowgroup_rows const>{
segmentation.rowgroups}] __device__(auto idx) {
return src[rg_bounds[idx][col_idx].end - 1];
});
rg_sizes[col_idx] = cudf::detail::make_std_vector_async(d_tmp_rowgroup_sizes, stream);
}
return {std::move(elem_sizes), std::move(rg_sizes)};
}
std::map<uint32_t, size_t> decimal_column_sizes(
std::map<uint32_t, std::vector<uint32_t>> const& chunk_sizes)
{
std::map<uint32_t, size_t> column_sizes;
std::transform(chunk_sizes.cbegin(),
chunk_sizes.cend(),
std::inserter(column_sizes, column_sizes.end()),
[](auto const& chunk_size) -> std::pair<uint32_t, size_t> {
return {
chunk_size.first,
std::accumulate(chunk_size.second.cbegin(), chunk_size.second.cend(), 0lu)};
});
return column_sizes;
}
string_dictionaries allocate_dictionaries(orc_table_view const& orc_table,
host_2dspan<rowgroup_rows const> rowgroup_bounds,
rmm::cuda_stream_view stream)
{
thrust::host_vector<bool> is_dict_enabled(orc_table.num_columns());
for (auto col_idx : orc_table.string_column_indices)
is_dict_enabled[col_idx] = std::all_of(
thrust::make_counting_iterator(0ul),
thrust::make_counting_iterator(rowgroup_bounds.size().first),
[&](auto rg_idx) {
return rowgroup_bounds[rg_idx][col_idx].size() < std::numeric_limits<uint16_t>::max();
});
std::vector<rmm::device_uvector<uint32_t>> data;
std::transform(orc_table.string_column_indices.begin(),
orc_table.string_column_indices.end(),
std::back_inserter(data),
[&](auto& idx) {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
orc_table.columns[idx].size(), stream);
});
std::vector<rmm::device_uvector<uint32_t>> index;
std::transform(orc_table.string_column_indices.begin(),
orc_table.string_column_indices.end(),
std::back_inserter(index),
[&](auto& idx) {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
orc_table.columns[idx].size(), stream);
});
stream.synchronize();
std::vector<device_span<uint32_t>> data_ptrs;
std::transform(data.begin(), data.end(), std::back_inserter(data_ptrs), [](auto& uvec) {
return device_span<uint32_t>{uvec};
});
std::vector<device_span<uint32_t>> index_ptrs;
std::transform(index.begin(), index.end(), std::back_inserter(index_ptrs), [](auto& uvec) {
return device_span<uint32_t>{uvec};
});
return {std::move(data),
std::move(index),
cudf::detail::make_device_uvector_sync(data_ptrs, stream),
cudf::detail::make_device_uvector_sync(index_ptrs, stream),
std::move(is_dict_enabled)};
}
void writer::impl::write(table_view const& table)
{
CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed");
auto const num_rows = table.num_rows();
auto const d_table = table_device_view::create(table, stream);
auto orc_table = make_orc_table_view(table, *d_table, user_metadata, stream);
auto rowgroup_bounds = calculate_rowgroup_bounds(orc_table, row_index_stride_, stream);
// Build per-column dictionary indices
auto dictionaries = allocate_dictionaries(orc_table, rowgroup_bounds, stream);
hostdevice_2dvector<gpu::DictionaryChunk> dict(
rowgroup_bounds.size().first, orc_table.num_string_columns(), stream);
if (orc_table.num_string_columns() != 0) {
init_dictionaries(orc_table,
rowgroup_bounds,
dictionaries.d_data_view,
dictionaries.d_index_view,
&dict,
stream);
}
// Decide stripe boundaries based on rowgroups and dict chunks
auto const segmentation =
calculate_segmentation(orc_table.columns, std::move(rowgroup_bounds), max_stripe_size_);
// Build stripe-level dictionaries
hostdevice_2dvector<gpu::StripeDictionary> stripe_dict(
segmentation.num_stripes(), orc_table.num_string_columns(), stream);
if (orc_table.num_string_columns() != 0) {
build_dictionaries(orc_table,
segmentation.stripes,
dict,
dictionaries.index,
dictionaries.dictionary_enabled,
stripe_dict);
}
auto dec_chunk_sizes = decimal_chunk_sizes(orc_table, segmentation, stream);
auto streams =
create_streams(orc_table.columns, segmentation, decimal_column_sizes(dec_chunk_sizes.rg_sizes));
auto enc_data = encode_columns(
orc_table, std::move(dictionaries), std::move(dec_chunk_sizes), segmentation, streams);
// Assemble individual disparate column chunks into contiguous data streams
size_type const num_index_streams = (orc_table.num_columns() + 1);
const auto num_data_streams = streams.size() - num_index_streams;
hostdevice_2dvector<gpu::StripeStream> strm_descs(
segmentation.num_stripes(), num_data_streams, stream);
auto stripes = gather_stripes(num_index_streams, segmentation, &enc_data.streams, &strm_descs);
// Gather column statistics
std::vector<ColStatsBlob> column_stats;
if (enable_statistics_ && table.num_columns() > 0 && num_rows > 0) {
column_stats = gather_statistic_blobs(orc_table, segmentation);
}
// Allocate intermediate output stream buffer
size_t compressed_bfr_size = 0;
size_t num_compressed_blocks = 0;
auto stream_output = [&]() {
size_t max_stream_size = 0;
bool all_device_write = true;
for (size_t stripe_id = 0; stripe_id < segmentation.num_stripes(); stripe_id++) {
for (size_t i = 0; i < num_data_streams; i++) { // TODO range for (at least)
gpu::StripeStream* ss = &strm_descs[stripe_id][i];
if (!out_sink_->is_device_write_preferred(ss->stream_size)) { all_device_write = false; }
size_t stream_size = ss->stream_size;
if (compression_kind_ != NONE) {
ss->first_block = num_compressed_blocks;
ss->bfr_offset = compressed_bfr_size;
auto num_blocks = std::max<uint32_t>(
(stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1);
stream_size += num_blocks * 3;
num_compressed_blocks += num_blocks;
compressed_bfr_size += stream_size;
}
max_stream_size = ::max(max_stream_size, stream_size);
}
}
if (all_device_write) {
return pinned_buffer<uint8_t>{nullptr, hipHostFree};
} else {
return pinned_buffer<uint8_t>{[](size_t size) {
uint8_t* ptr = nullptr;
CUDA_TRY(hipHostMalloc(&ptr, size));
return ptr;
}(max_stream_size),
hipHostFree};
}
}();
// Compress the data streams
rmm::device_buffer compressed_data(compressed_bfr_size, stream);
hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks, stream);
hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks, stream);
if (compression_kind_ != NONE) {
strm_descs.host_to_device(stream);
gpu::CompressOrcDataStreams(static_cast<uint8_t*>(compressed_data.data()),
num_compressed_blocks,
compression_kind_,
compression_blocksize_,
strm_descs,
enc_data.streams,
comp_in.device_ptr(),
comp_out.device_ptr(),
stream);
strm_descs.device_to_host(stream);
comp_out.device_to_host(stream, true);
}
ProtobufWriter pbw_(&buffer_);
// Write stripes
for (size_t stripe_id = 0; stripe_id < stripes.size(); ++stripe_id) {
auto const& rowgroups_range = segmentation.stripes[stripe_id];
auto& stripe = stripes[stripe_id];
stripe.offset = out_sink_->bytes_written();
// Column (skippable) index streams appear at the start of the stripe
for (size_type stream_id = 0; stream_id < num_index_streams; ++stream_id) {
write_index_stream(stripe_id,
stream_id,
orc_table.columns,
rowgroups_range,
enc_data.streams,
strm_descs,
comp_out,
&stripe,
&streams,
&pbw_);
}
// Column data consisting one or more separate streams
for (auto const& strm_desc : strm_descs[stripe_id]) {
write_data_stream(strm_desc,
enc_data.streams[strm_desc.column_id][rowgroups_range.first],
static_cast<uint8_t*>(compressed_data.data()),
stream_output.get(),
&stripe,
&streams);
}
// Write stripefooter consisting of stream information
StripeFooter sf;
sf.streams = streams;
sf.columns.resize(orc_table.num_columns() + 1);
sf.columns[0].kind = DIRECT;
for (size_t i = 1; i < sf.columns.size(); ++i) {
sf.columns[i].kind = orc_table.column(i - 1).orc_encoding();
sf.columns[i].dictionarySize =
(sf.columns[i].kind == DICTIONARY_V2)
? orc_table.column(i - 1).host_stripe_dict(stripe_id)->num_strings
: 0;
if (orc_table.column(i - 1).orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; }
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(sf);
stripe.footerLength = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_sf_len = (stripe.footerLength - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
}
if (column_stats.size() != 0) {
// File-level statistics
// NOTE: Excluded from chunked write mode to avoid the need for merging stats across calls
if (single_write_mode) {
// First entry contains total number of rows
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(num_rows);
ff.statistics.reserve(1 + orc_table.num_columns());
ff.statistics.emplace_back(std::move(buffer_));
// Add file stats, stored after stripe stats in `column_stats`
ff.statistics.insert(
ff.statistics.end(),
std::make_move_iterator(column_stats.begin()) + stripes.size() * orc_table.num_columns(),
std::make_move_iterator(column_stats.end()));
}
// Stripe-level statistics
size_t first_stripe = md.stripeStats.size();
md.stripeStats.resize(first_stripe + stripes.size());
for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) {
md.stripeStats[first_stripe + stripe_id].colStats.resize(1 + orc_table.num_columns());
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(stripes[stripe_id].numberOfRows);
md.stripeStats[first_stripe + stripe_id].colStats[0] = std::move(buffer_);
for (size_t col_idx = 0; col_idx < orc_table.num_columns(); col_idx++) {
size_t idx = stripes.size() * col_idx + stripe_id;
if (idx < column_stats.size()) {
md.stripeStats[first_stripe + stripe_id].colStats[1 + col_idx] =
std::move(column_stats[idx]);
}
}
}
}
if (ff.headerLength == 0) {
// First call
ff.headerLength = std::strlen(MAGIC);
ff.rowIndexStride = row_index_stride_;
ff.types.resize(1 + orc_table.num_columns());
ff.types[0].kind = STRUCT;
for (auto const& column : orc_table.columns) {
if (!column.is_child()) {
ff.types[0].subtypes.emplace_back(column.id());
ff.types[0].fieldNames.emplace_back(column.orc_name());
}
}
for (auto const& column : orc_table.columns) {
auto& schema_type = ff.types[column.id()];
schema_type.kind = column.orc_kind();
if (column.orc_kind() == DECIMAL) {
schema_type.scale = static_cast<uint32_t>(column.scale());
schema_type.precision = column.precision();
}
// In preorder traversal the column after a list column is always the child column
if (column.orc_kind() == LIST) { schema_type.subtypes.emplace_back(column.id() + 1); }
}
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(ff.types.size() == 1 + orc_table.num_columns(),
"Mismatch in table structure between multiple calls to write");
CUDF_EXPECTS(
std::all_of(orc_table.columns.cbegin(),
orc_table.columns.cend(),
[&](auto const& col) { return ff.types[col.id()].kind == col.orc_kind(); }),
"Mismatch in column types between multiple calls to write");
}
ff.stripes.insert(ff.stripes.end(),
std::make_move_iterator(stripes.begin()),
std::make_move_iterator(stripes.end()));
ff.numberOfRows += num_rows;
}
void writer::impl::close()
{
if (closed) { return; }
closed = true;
ProtobufWriter pbw_(&buffer_);
PostScript ps;
ff.contentLength = out_sink_->bytes_written();
if (user_metadata) {
for (auto it = user_metadata->user_data.begin(); it != user_metadata->user_data.end(); it++) {
ff.metadata.push_back({it->first, it->second});
}
}
// Write statistics metadata
if (md.stripeStats.size() != 0) {
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(md);
add_uncompressed_block_headers(buffer_);
ps.metadataLength = buffer_.size();
out_sink_->host_write(buffer_.data(), buffer_.size());
} else {
ps.metadataLength = 0;
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(ff);
add_uncompressed_block_headers(buffer_);
// Write postscript metadata
ps.footerLength = buffer_.size();
ps.compression = compression_kind_;
ps.compressionBlockSize = compression_blocksize_;
ps.version = {0, 12};
ps.magic = MAGIC;
const auto ps_length = static_cast<uint8_t>(pbw_.write(ps));
buffer_.push_back(ps_length);
out_sink_->host_write(buffer_.data(), buffer_.size());
out_sink_->flush();
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr))
{
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
void writer::write(table_view const& table) { _impl->write(table); }
// Forward to implementation
void writer::close() { _impl->close(); }
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
4c7cf3e3a5bd72875df53d468351a28daf11dcef.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file writer_impl.cu
* @brief cuDF-IO ORC writer class implementation
*/
#include "writer_impl.hpp"
#include <io/statistics/column_statistics.cuh>
#include <io/utilities/column_utils.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
#include <cstring>
#include <numeric>
#include <utility>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
using namespace cudf::io::orc;
using namespace cudf::io;
struct row_group_index_info {
int32_t pos = -1; // Position
int32_t blk_pos = -1; // Block Position
int32_t comp_pos = -1; // Compressed Position
int32_t comp_size = -1; // Compressed size
};
namespace {
/**
* @brief Helper for pinned host memory
*/
template <typename T>
using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>;
/**
* @brief Function that translates GDF compression to ORC compression
*/
orc::CompressionKind to_orc_compression(compression_type compression)
{
switch (compression) {
case compression_type::AUTO:
case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY;
case compression_type::NONE: return orc::CompressionKind::NONE;
default: CUDF_EXPECTS(false, "Unsupported compression type"); return orc::CompressionKind::NONE;
}
}
/**
* @brief Function that translates GDF dtype to ORC datatype
*/
constexpr orc::TypeKind to_orc_type(cudf::type_id id)
{
switch (id) {
case cudf::type_id::INT8: return TypeKind::BYTE;
case cudf::type_id::INT16: return TypeKind::SHORT;
case cudf::type_id::INT32: return TypeKind::INT;
case cudf::type_id::INT64: return TypeKind::LONG;
case cudf::type_id::FLOAT32: return TypeKind::FLOAT;
case cudf::type_id::FLOAT64: return TypeKind::DOUBLE;
case cudf::type_id::BOOL8: return TypeKind::BOOLEAN;
case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE;
case cudf::type_id::TIMESTAMP_SECONDS:
case cudf::type_id::TIMESTAMP_MICROSECONDS:
case cudf::type_id::TIMESTAMP_MILLISECONDS:
case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP;
case cudf::type_id::STRING: return TypeKind::STRING;
case cudf::type_id::DECIMAL32:
case cudf::type_id::DECIMAL64: return TypeKind::DECIMAL;
case cudf::type_id::LIST: return TypeKind::LIST;
default: return TypeKind::INVALID_TYPE_KIND;
}
}
/**
* @brief Translates time unit to nanoscale multiple.
*/
constexpr int32_t to_clockscale(cudf::type_id timestamp_id)
{
switch (timestamp_id) {
case cudf::type_id::TIMESTAMP_SECONDS: return 9;
case cudf::type_id::TIMESTAMP_MILLISECONDS: return 6;
case cudf::type_id::TIMESTAMP_MICROSECONDS: return 3;
case cudf::type_id::TIMESTAMP_NANOSECONDS:
default: return 0;
}
}
/**
* @brief Returns the precision of the given decimal type.
*/
constexpr auto orc_precision(cudf::type_id decimal_id)
{
switch (decimal_id) {
case cudf::type_id::DECIMAL32: return 9;
case cudf::type_id::DECIMAL64: return 18;
default: return 0;
}
}
} // namespace
/**
* @brief Helper class that adds ORC-specific column info
*/
class orc_column_view {
public:
/**
* @brief Constructor that extracts out the string position + length pairs
* for building dictionaries for string columns
*/
explicit orc_column_view(uint32_t index,
int str_idx,
int index_in_table,
column_view const& col,
const table_metadata* metadata)
: cudf_column{col},
_index{index},
_str_idx{str_idx},
_is_child{index_in_table < 0},
_type_width{cudf::is_fixed_width(col.type()) ? cudf::size_of(col.type()) : 0},
_scale{(to_orc_type(col.type().id()) == TypeKind::DECIMAL) ? -col.type().scale()
: to_clockscale(col.type().id())},
_precision{orc_precision(col.type().id())},
_type_kind{to_orc_type(col.type().id())}
{
// Don't assign names to child columns
if (index_in_table >= 0) {
if (metadata != nullptr && index_in_table < static_cast<int>(metadata->column_names.size())) {
_name = metadata->column_names[index_in_table];
} else {
// Generating default name if name isn't present in metadata
_name = "_col" + std::to_string(index_in_table);
}
}
}
auto is_string() const noexcept { return cudf_column.type().id() == type_id::STRING; }
void set_dict_stride(size_t stride) noexcept { _dict_stride = stride; }
auto dict_stride() const noexcept { return _dict_stride; }
/**
* @brief Function that associates an existing dictionary chunk allocation
*/
void attach_dict_chunk(gpu::DictionaryChunk const* host_dict,
gpu::DictionaryChunk const* dev_dict)
{
dict = host_dict;
d_dict = dev_dict;
}
auto host_dict_chunk(size_t rowgroup) const
{
CUDF_EXPECTS(is_string(), "Dictionary chunks are only present in string columns.");
return &dict[rowgroup * _dict_stride + _str_idx];
}
auto device_dict_chunk() const { return d_dict; }
auto const& decimal_offsets() const { return d_decimal_offsets; }
void attach_decimal_offsets(uint32_t* sizes_ptr) { d_decimal_offsets = sizes_ptr; }
/**
* @brief Function that associates an existing stripe dictionary allocation
*/
void attach_stripe_dict(gpu::StripeDictionary* host_stripe_dict,
gpu::StripeDictionary* dev_stripe_dict)
{
stripe_dict = host_stripe_dict;
d_stripe_dict = dev_stripe_dict;
}
auto host_stripe_dict(size_t stripe) const
{
CUDF_EXPECTS(is_string(), "Stripe dictionary is only present in string columns.");
return &stripe_dict[stripe * _dict_stride + _str_idx];
}
auto device_stripe_dict() const noexcept { return d_stripe_dict; }
// Index in the table
auto index() const noexcept { return _index; }
// Id in the ORC file
auto id() const noexcept { return _index + 1; }
auto is_child() const noexcept { return _is_child; }
auto type_width() const noexcept { return _type_width; }
auto size() const noexcept { return cudf_column.size(); }
auto null_count() const noexcept { return cudf_column.null_count(); }
auto null_mask() const noexcept { return cudf_column.null_mask(); }
bool nullable() const noexcept { return null_mask() != nullptr; }
auto scale() const noexcept { return _scale; }
auto precision() const noexcept { return _precision; }
void set_orc_encoding(ColumnEncodingKind e) noexcept { _encoding_kind = e; }
auto orc_kind() const noexcept { return _type_kind; }
auto orc_encoding() const noexcept { return _encoding_kind; }
auto orc_name() const noexcept { return _name; }
private:
column_view cudf_column;
// Identifier within the set of columns
uint32_t _index = 0;
// Identifier within the set of string columns
int _str_idx;
bool _is_child = false;
size_t _type_width = 0;
int32_t _scale = 0;
int32_t _precision = 0;
// ORC-related members
std::string _name{};
TypeKind _type_kind;
ColumnEncodingKind _encoding_kind;
// String dictionary-related members
size_t _dict_stride = 0;
gpu::DictionaryChunk const* dict = nullptr;
gpu::StripeDictionary const* stripe_dict = nullptr;
gpu::DictionaryChunk const* d_dict = nullptr;
gpu::StripeDictionary const* d_stripe_dict = nullptr;
// Offsets for encoded decimal elements. Used to enable direct writing of encoded decimal elements
// into the output stream.
uint32_t* d_decimal_offsets = nullptr;
};
size_type orc_table_view::num_rows() const noexcept
{
return columns.empty() ? 0 : columns.front().size();
}
/**
* @brief Gathers stripe information.
*
* @param columns List of columns
* @param rowgroup_bounds Ranges of rows in each rowgroup [rowgroup][column]
* @param max_stripe_bytes Maximum size of each stripe, in bytes
* @return List of stripe descriptors
*/
file_segmentation calculate_segmentation(host_span<orc_column_view const> columns,
hostdevice_2dvector<rowgroup_rows>&& rowgroup_bounds,
uint32_t max_stripe_bytes)
{
auto const is_any_column_string =
std::any_of(columns.begin(), columns.end(), [](auto const& col) { return col.is_string(); });
// Apply rows per stripe limit to limit string dictionaries
size_t const max_stripe_rows = is_any_column_string ? 1000000 : 5000000;
std::vector<stripe_rowgroups> infos;
auto const num_rowgroups = rowgroup_bounds.size().first;
size_t stripe_start = 0;
size_t stripe_bytes = 0;
size_t stripe_rows = 0;
for (size_t rg_idx = 0; rg_idx < num_rowgroups; ++rg_idx) {
auto const rowgroup_total_bytes =
std::accumulate(columns.begin(), columns.end(), 0ul, [&](size_t total_size, auto const& col) {
auto const rows = rowgroup_bounds[rg_idx][col.index()].size();
if (col.is_string()) {
const auto dt = col.host_dict_chunk(rg_idx);
return total_size + rows + dt->string_char_count;
} else {
return total_size + col.type_width() * rows;
}
});
auto const rowgroup_rows_max =
std::max_element(rowgroup_bounds[rg_idx].begin(),
rowgroup_bounds[rg_idx].end(),
[](auto& l, auto& r) { return l.size() < r.size(); })
->size();
// Check if adding the current rowgroup to the stripe will make the stripe too large or long
if ((rg_idx > stripe_start) && (stripe_bytes + rowgroup_total_bytes > max_stripe_bytes ||
stripe_rows + rowgroup_rows_max > max_stripe_rows)) {
infos.emplace_back(infos.size(), stripe_start, rg_idx - stripe_start);
stripe_start = rg_idx;
stripe_bytes = 0;
stripe_rows = 0;
}
stripe_bytes += rowgroup_total_bytes;
stripe_rows += rowgroup_rows_max;
if (rg_idx + 1 == num_rowgroups) {
infos.emplace_back(infos.size(), stripe_start, num_rowgroups - stripe_start);
}
}
return {std::move(rowgroup_bounds), std::move(infos)};
}
/**
* @brief Builds up column dictionaries indices
*
* @param orc_table Non-owning view of a cuDF table w/ ORC-related info
* @param rowgroup_bounds Ranges of rows in each rowgroup [rowgroup][column]
* @param dict_data Dictionary data memory
* @param dict_index Dictionary index memory
* @param dict List of dictionary chunks
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void init_dictionaries(orc_table_view& orc_table,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
hostdevice_2dvector<gpu::DictionaryChunk>* dict,
rmm::cuda_stream_view stream)
{
// Setup per-rowgroup dictionary indexes for each dictionary-aware column
for (auto col_idx : orc_table.string_column_indices) {
auto& str_column = orc_table.column(col_idx);
str_column.set_dict_stride(orc_table.num_string_columns());
str_column.attach_dict_chunk(dict->base_host_ptr(), dict->base_device_ptr());
}
// Allocate temporary memory for dictionary indices
std::vector<rmm::device_uvector<uint32_t>> dict_indices;
dict_indices.reserve(orc_table.num_string_columns());
std::transform(orc_table.string_column_indices.cbegin(),
orc_table.string_column_indices.cend(),
std::back_inserter(dict_indices),
[&](auto& col_idx) {
auto& str_column = orc_table.column(col_idx);
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
str_column.size(), stream);
});
// Create views of the temporary buffers in device memory
std::vector<device_span<uint32_t>> dict_indices_views;
dict_indices_views.reserve(dict_indices.size());
std::transform(
dict_indices.begin(), dict_indices.end(), std::back_inserter(dict_indices_views), [](auto& di) {
return device_span<uint32_t>{di};
});
auto d_dict_indices_views = cudf::detail::make_device_uvector_async(dict_indices_views, stream);
gpu::InitDictionaryIndices(orc_table.d_columns,
*dict,
dict_data,
dict_index,
d_dict_indices_views,
rowgroup_bounds,
orc_table.d_string_column_indices,
stream);
dict->device_to_host(stream, true);
}
void writer::impl::build_dictionaries(orc_table_view& orc_table,
host_span<stripe_rowgroups const> stripe_bounds,
hostdevice_2dvector<gpu::DictionaryChunk> const& dict,
host_span<rmm::device_uvector<uint32_t>> dict_index,
host_span<bool const> dictionary_enabled,
hostdevice_2dvector<gpu::StripeDictionary>& stripe_dict)
{
const auto num_rowgroups = dict.size().first;
for (size_t dict_idx = 0; dict_idx < orc_table.num_string_columns(); ++dict_idx) {
auto& str_column = orc_table.string_column(dict_idx);
str_column.attach_stripe_dict(stripe_dict.base_host_ptr(), stripe_dict.base_device_ptr());
for (auto const& stripe : stripe_bounds) {
auto& sd = stripe_dict[stripe.id][dict_idx];
sd.dict_data = str_column.host_dict_chunk(stripe.first)->dict_data;
sd.dict_index = dict_index[dict_idx].data(); // Indexed by abs row
sd.column_id = orc_table.string_column_indices[dict_idx];
sd.start_chunk = stripe.first;
sd.num_chunks = stripe.size;
sd.dict_char_count = 0;
sd.num_strings =
std::accumulate(stripe.cbegin(), stripe.cend(), 0, [&](auto dt_str_cnt, auto rg_idx) {
const auto& dt = dict[rg_idx][dict_idx];
return dt_str_cnt + dt.num_dict_strings;
});
sd.leaf_column = dict[0][dict_idx].leaf_column;
}
if (enable_dictionary_) {
struct string_column_cost {
size_t direct = 0;
size_t dictionary = 0;
};
auto const col_cost =
std::accumulate(stripe_bounds.front().cbegin(),
stripe_bounds.back().cend(),
string_column_cost{},
[&](auto cost, auto rg_idx) -> string_column_cost {
const auto& dt = dict[rg_idx][dict_idx];
return {cost.direct + dt.string_char_count,
cost.dictionary + dt.dict_char_count + dt.num_dict_strings};
});
// Disable dictionary if it does not reduce the output size
if (!dictionary_enabled[orc_table.string_column(dict_idx).index()] ||
col_cost.dictionary >= col_cost.direct) {
for (auto const& stripe : stripe_bounds) {
stripe_dict[stripe.id][dict_idx].dict_data = nullptr;
}
}
}
}
stripe_dict.host_to_device(stream);
gpu::BuildStripeDictionaries(stripe_dict, stripe_dict, dict, stream);
stripe_dict.device_to_host(stream, true);
}
constexpr size_t RLE_stream_size(TypeKind kind, size_t count)
{
using cudf::util::div_rounding_up_unsafe;
constexpr auto byte_rle_max_len = 128;
switch (kind) {
case TypeKind::BOOLEAN:
return div_rounding_up_unsafe(count, byte_rle_max_len * 8) * (byte_rle_max_len + 1);
case TypeKind::BYTE:
return div_rounding_up_unsafe(count, byte_rle_max_len) * (byte_rle_max_len + 1);
case TypeKind::SHORT:
return div_rounding_up_unsafe(count, gpu::encode_block_size) *
(gpu::encode_block_size * sizeof(int16_t) + 2);
case TypeKind::FLOAT:
case TypeKind::INT:
case TypeKind::DATE:
return div_rounding_up_unsafe(count, gpu::encode_block_size) *
(gpu::encode_block_size * sizeof(int32_t) + 2);
case TypeKind::LONG:
case TypeKind::DOUBLE:
return div_rounding_up_unsafe(count, gpu::encode_block_size) *
(gpu::encode_block_size * sizeof(int64_t) + 2);
default: CUDF_FAIL("Unsupported ORC type for RLE stream size");
}
}
orc_streams writer::impl::create_streams(host_span<orc_column_view> columns,
file_segmentation const& segmentation,
std::map<uint32_t, size_t> const& decimal_column_sizes)
{
// 'column 0' row index stream
std::vector<Stream> streams{{ROW_INDEX, 0}}; // TODO: Separate index and data streams?
// First n + 1 streams are row index streams
streams.reserve(columns.size() + 1);
std::transform(columns.begin(), columns.end(), std::back_inserter(streams), [](auto const& col) {
return Stream{ROW_INDEX, col.id()};
});
std::vector<int32_t> ids(columns.size() * gpu::CI_NUM_STREAMS, -1);
std::vector<TypeKind> types(streams.size(), INVALID_TYPE_KIND);
for (auto& column : columns) {
auto const is_nullable = [&]() -> bool {
if (single_write_mode) {
return column.nullable();
} else {
if (user_metadata_with_nullability.column_nullable.empty()) return true;
CUDF_EXPECTS(user_metadata_with_nullability.column_nullable.size() > column.index(),
"When passing values in user_metadata_with_nullability, data for all columns "
"must be specified");
return user_metadata_with_nullability.column_nullable[column.index()];
}
}();
auto RLE_column_size = [&](TypeKind type_kind) {
return std::accumulate(
thrust::make_counting_iterator(0ul),
thrust::make_counting_iterator(segmentation.num_rowgroups()),
0ul,
[&](auto data_size, auto rg_idx) {
return data_size +
RLE_stream_size(type_kind, segmentation.rowgroups[rg_idx][column.index()].size());
});
};
auto const kind = column.orc_kind();
auto add_stream =
[&](gpu::StreamIndexType index_type, StreamKind kind, TypeKind type_kind, size_t size) {
const auto base = column.index() * gpu::CI_NUM_STREAMS;
ids[base + index_type] = streams.size();
streams.push_back(orc::Stream{kind, column.id(), size});
types.push_back(type_kind);
};
auto add_RLE_stream = [&](
gpu::StreamIndexType index_type, StreamKind kind, TypeKind type_kind) {
add_stream(index_type, kind, type_kind, RLE_column_size(type_kind));
};
if (is_nullable) { add_RLE_stream(gpu::CI_PRESENT, PRESENT, TypeKind::BOOLEAN); }
switch (kind) {
case TypeKind::BOOLEAN:
case TypeKind::BYTE:
add_RLE_stream(gpu::CI_DATA, DATA, kind);
column.set_orc_encoding(DIRECT);
break;
case TypeKind::SHORT:
case TypeKind::INT:
case TypeKind::LONG:
case TypeKind::DATE:
add_RLE_stream(gpu::CI_DATA, DATA, kind);
column.set_orc_encoding(DIRECT_V2);
break;
case TypeKind::FLOAT:
case TypeKind::DOUBLE:
// Pass through if no nulls (no RLE encoding for floating point)
add_stream(
gpu::CI_DATA, DATA, kind, (column.null_count() != 0) ? RLE_column_size(kind) : 0);
column.set_orc_encoding(DIRECT);
break;
case TypeKind::STRING: {
bool enable_dict = enable_dictionary_;
size_t dict_data_size = 0;
size_t dict_strings = 0;
size_t dict_lengths_div512 = 0;
for (auto const& stripe : segmentation.stripes) {
const auto sd = column.host_stripe_dict(stripe.id);
enable_dict = (enable_dict && sd->dict_data != nullptr);
if (enable_dict) {
dict_strings += sd->num_strings;
dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9;
dict_data_size += sd->dict_char_count;
}
}
auto const direct_data_size =
std::accumulate(segmentation.stripes.front().cbegin(),
segmentation.stripes.back().cend(),
size_t{0},
[&](auto data_size, auto rg_idx) {
return data_size + column.host_dict_chunk(rg_idx)->string_char_count;
});
if (enable_dict) {
uint32_t dict_bits = 0;
for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) {
if (dict_strings <= (1ull << dict_bits)) break;
}
const auto valid_count = column.size() - column.null_count();
dict_data_size += (dict_bits * valid_count + 7) >> 3;
}
// Decide between direct or dictionary encoding
if (enable_dict && dict_data_size < direct_data_size) {
add_RLE_stream(gpu::CI_DATA, DATA, TypeKind::INT);
add_stream(gpu::CI_DATA2, LENGTH, TypeKind::INT, dict_lengths_div512 * (512 * 4 + 2));
add_stream(
gpu::CI_DICTIONARY, DICTIONARY_DATA, TypeKind::CHAR, std::max(dict_data_size, 1ul));
column.set_orc_encoding(DICTIONARY_V2);
} else {
add_stream(gpu::CI_DATA, DATA, TypeKind::CHAR, std::max<size_t>(direct_data_size, 1));
add_RLE_stream(gpu::CI_DATA2, LENGTH, TypeKind::INT);
column.set_orc_encoding(DIRECT_V2);
}
break;
}
case TypeKind::TIMESTAMP:
add_RLE_stream(gpu::CI_DATA, DATA, TypeKind::LONG);
add_RLE_stream(gpu::CI_DATA2, SECONDARY, TypeKind::LONG);
column.set_orc_encoding(DIRECT_V2);
break;
case TypeKind::DECIMAL:
// varint values (NO RLE)
// data_stream_size = decimal_column_sizes.at(column.index());
add_stream(gpu::CI_DATA, DATA, TypeKind::DECIMAL, decimal_column_sizes.at(column.index()));
// scale stream TODO: compute exact size since all elems are equal
add_RLE_stream(gpu::CI_DATA2, SECONDARY, TypeKind::INT);
column.set_orc_encoding(DIRECT_V2);
break;
case TypeKind::LIST:
// no data stream, only lengths
add_RLE_stream(gpu::CI_DATA2, LENGTH, TypeKind::INT);
column.set_orc_encoding(DIRECT_V2);
break;
default: CUDF_FAIL("Unsupported ORC type kind");
}
}
return {std::move(streams), std::move(ids), std::move(types)};
}
orc_streams::orc_stream_offsets orc_streams::compute_offsets(
host_span<orc_column_view const> columns, size_t num_rowgroups) const
{
std::vector<size_t> strm_offsets(streams.size());
size_t non_rle_data_size = 0;
size_t rle_data_size = 0;
for (size_t i = 0; i < streams.size(); ++i) {
const auto& stream = streams[i];
auto const is_rle_data = [&]() {
// First stream is an index stream, don't check types, etc.
if (!stream.column_index().has_value()) return true;
auto const& column = columns[stream.column_index().value()];
// Dictionary encoded string column - dictionary characters or
// directly encoded string - column characters
if (column.orc_kind() == TypeKind::STRING &&
((stream.kind == DICTIONARY_DATA && column.orc_encoding() == DICTIONARY_V2) ||
(stream.kind == DATA && column.orc_encoding() == DIRECT_V2)))
return false;
// Decimal data
if (column.orc_kind() == TypeKind::DECIMAL && stream.kind == DATA) return false;
// Everything else uses RLE
return true;
}();
// non-RLE and RLE streams are separated in the buffer that stores encoded data
// The computed offsets do not take the streams of the other type into account
if (is_rle_data) {
strm_offsets[i] = rle_data_size;
rle_data_size += (stream.length + 7) & ~7;
} else {
strm_offsets[i] = non_rle_data_size;
non_rle_data_size += stream.length;
}
}
non_rle_data_size = (non_rle_data_size + 7) & ~7;
return {std::move(strm_offsets), non_rle_data_size, rle_data_size};
}
struct segmented_valid_cnt_input {
bitmask_type const* mask;
std::vector<size_type> indices;
};
encoded_data writer::impl::encode_columns(orc_table_view const& orc_table,
string_dictionaries&& dictionaries,
encoder_decimal_info&& dec_chunk_sizes,
file_segmentation const& segmentation,
orc_streams const& streams)
{
auto const num_columns = orc_table.num_columns();
hostdevice_2dvector<gpu::EncChunk> chunks(num_columns, segmentation.num_rowgroups(), stream);
auto const stream_offsets =
streams.compute_offsets(orc_table.columns, segmentation.num_rowgroups());
rmm::device_uvector<uint8_t> encoded_data(stream_offsets.data_size(), stream);
// Initialize column chunks' descriptions
std::map<size_type, segmented_valid_cnt_input> validity_check_inputs;
for (auto const& column : orc_table.columns) {
for (auto const& stripe : segmentation.stripes) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto& ck = chunks[column.index()][rg_idx];
ck.start_row = segmentation.rowgroups[rg_idx][column.index()].begin;
ck.num_rows = segmentation.rowgroups[rg_idx][column.index()].size();
ck.encoding_kind = column.orc_encoding();
ck.type_kind = column.orc_kind();
if (ck.type_kind == TypeKind::STRING) {
ck.dict_index = (ck.encoding_kind == DICTIONARY_V2)
? column.host_stripe_dict(stripe.id)->dict_index
: nullptr;
ck.dtype_len = 1;
} else {
ck.dtype_len = column.type_width();
}
ck.scale = column.scale();
if (ck.type_kind == TypeKind::DECIMAL) {
ck.decimal_offsets = device_span<uint32_t>{column.decimal_offsets(), ck.num_rows};
}
}
}
}
auto validity_check_indices = [&](size_t col_idx) {
std::vector<size_type> indices;
for (auto const& stripe : segmentation.stripes) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend() - 1; ++rg_idx_it) {
auto const& chunk = chunks[col_idx][*rg_idx_it];
indices.push_back(chunk.start_row);
indices.push_back(chunk.start_row + chunk.num_rows);
}
}
return indices;
};
for (auto const& column : orc_table.columns) {
if (column.orc_kind() == TypeKind::BOOLEAN && column.nullable()) {
validity_check_inputs[column.index()] = {column.null_mask(),
validity_check_indices(column.index())};
}
}
for (auto& cnt_in : validity_check_inputs) {
auto const valid_counts = segmented_count_set_bits(cnt_in.second.mask, cnt_in.second.indices);
CUDF_EXPECTS(
std::none_of(valid_counts.cbegin(),
valid_counts.cend(),
[](auto valid_count) { return valid_count % 8; }),
"There's currently a bug in encoding boolean columns. Suggested workaround is to convert "
"to int8 type."
" Please see https://github.com/rapidsai/cudf/issues/6763 for more information.");
}
hostdevice_2dvector<gpu::encoder_chunk_streams> chunk_streams(
num_columns, segmentation.num_rowgroups(), stream);
for (size_t col_idx = 0; col_idx < num_columns; col_idx++) {
auto const& column = orc_table.column(col_idx);
auto col_streams = chunk_streams[col_idx];
for (auto const& stripe : segmentation.stripes) {
for (auto rg_idx_it = stripe.cbegin(); rg_idx_it < stripe.cend(); ++rg_idx_it) {
auto const rg_idx = *rg_idx_it;
auto const& ck = chunks[col_idx][rg_idx];
auto& strm = col_streams[rg_idx];
for (int strm_type = 0; strm_type < gpu::CI_NUM_STREAMS; ++strm_type) {
auto const strm_id = streams.id(col_idx * gpu::CI_NUM_STREAMS + strm_type);
strm.ids[strm_type] = strm_id;
if (strm_id >= 0) {
if ((strm_type == gpu::CI_DICTIONARY) ||
(strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)) {
if (rg_idx_it == stripe.cbegin()) {
const int32_t dict_stride = column.dict_stride();
const auto stripe_dict = column.host_stripe_dict(stripe.id);
strm.lengths[strm_type] =
(strm_type == gpu::CI_DICTIONARY)
? stripe_dict->dict_char_count
: (((stripe_dict->num_strings + 0x1ff) >> 9) * (512 * 4 + 2));
if (stripe.id == 0) {
strm.data_ptrs[strm_type] = encoded_data.data() + stream_offsets.offsets[strm_id];
// Dictionary lengths are encoded as RLE, which are all stored after non-RLE data:
// include non-RLE data size in the offset only in that case
if (strm_type == gpu::CI_DATA2 && ck.encoding_kind == DICTIONARY_V2)
strm.data_ptrs[strm_type] += stream_offsets.non_rle_data_size;
} else {
auto const& strm_up = col_streams[stripe_dict[-dict_stride].start_chunk];
strm.data_ptrs[strm_type] =
strm_up.data_ptrs[strm_type] + strm_up.lengths[strm_type];
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = col_streams[rg_idx - 1].data_ptrs[strm_type];
}
} else if (strm_type == gpu::CI_DATA && ck.type_kind == TypeKind::STRING &&
ck.encoding_kind == DIRECT_V2) {
strm.lengths[strm_type] = column.host_dict_chunk(rg_idx)->string_char_count;
strm.data_ptrs[strm_type] = (rg_idx == 0)
? encoded_data.data() + stream_offsets.offsets[strm_id]
: (col_streams[rg_idx - 1].data_ptrs[strm_type] +
col_streams[rg_idx - 1].lengths[strm_type]);
} else if (strm_type == gpu::CI_DATA && streams[strm_id].length == 0 &&
(ck.type_kind == DOUBLE || ck.type_kind == FLOAT)) {
// Pass-through
strm.lengths[strm_type] = ck.num_rows * ck.dtype_len;
strm.data_ptrs[strm_type] = nullptr;
} else if (ck.type_kind == DECIMAL && strm_type == gpu::CI_DATA) {
strm.lengths[strm_type] = dec_chunk_sizes.rg_sizes.at(col_idx)[rg_idx];
strm.data_ptrs[strm_type] = (rg_idx == 0)
? encoded_data.data() + stream_offsets.offsets[strm_id]
: (col_streams[rg_idx - 1].data_ptrs[strm_type] +
col_streams[rg_idx - 1].lengths[strm_type]);
} else {
strm.lengths[strm_type] = RLE_stream_size(streams.type(strm_id), ck.num_rows);
// RLE encoded streams are stored after all non-RLE streams
strm.data_ptrs[strm_type] =
(rg_idx == 0) ? (encoded_data.data() + stream_offsets.non_rle_data_size +
stream_offsets.offsets[strm_id])
: (col_streams[rg_idx - 1].data_ptrs[strm_type] +
col_streams[rg_idx - 1].lengths[strm_type]);
}
} else {
strm.lengths[strm_type] = 0;
strm.data_ptrs[strm_type] = nullptr;
}
}
}
}
}
chunks.host_to_device(stream);
chunk_streams.host_to_device(stream);
gpu::set_chunk_columns(orc_table.d_columns, chunks, stream);
if (orc_table.num_string_columns() != 0) {
auto d_stripe_dict = orc_table.string_column(0).device_stripe_dict();
gpu::EncodeStripeDictionaries(d_stripe_dict,
chunks,
orc_table.num_string_columns(),
segmentation.num_stripes(),
chunk_streams,
stream);
}
gpu::EncodeOrcColumnData(chunks, chunk_streams, stream);
dictionaries.data.clear();
dictionaries.index.clear();
stream.synchronize();
return {std::move(encoded_data), std::move(chunk_streams)};
}
std::vector<StripeInformation> writer::impl::gather_stripes(
size_t num_index_streams,
file_segmentation const& segmentation,
hostdevice_2dvector<gpu::encoder_chunk_streams>* enc_streams,
hostdevice_2dvector<gpu::StripeStream>* strm_desc)
{
std::vector<StripeInformation> stripes(segmentation.num_stripes());
for (auto const& stripe : segmentation.stripes) {
for (size_t col_idx = 0; col_idx < enc_streams->size().first; col_idx++) {
const auto& strm = (*enc_streams)[col_idx][stripe.first];
// Assign stream data of column data stream(s)
for (int k = 0; k < gpu::CI_INDEX; k++) {
const auto stream_id = strm.ids[k];
if (stream_id != -1) {
auto* ss = &(*strm_desc)[stripe.id][stream_id - num_index_streams];
ss->stream_size = 0;
ss->first_chunk_id = stripe.first;
ss->num_chunks = stripe.size;
ss->column_id = col_idx;
ss->stream_type = k;
}
}
}
stripes[stripe.id].numberOfRows =
stripe.size == 0 ? 0
: segmentation.rowgroups[stripe.first + stripe.size - 1][0].end -
segmentation.rowgroups[stripe.first][0].begin;
}
strm_desc->host_to_device(stream);
gpu::CompactOrcDataStreams(*strm_desc, *enc_streams, stream);
strm_desc->device_to_host(stream);
enc_streams->device_to_host(stream, true);
return stripes;
}
void set_stat_desc_leaf_cols(device_span<orc_column_device_view const> columns,
device_span<stats_column_desc> stat_desc,
rmm::cuda_stream_view stream)
{
thrust::for_each(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
thrust::make_counting_iterator(stat_desc.size()),
[=] __device__(auto idx) { stat_desc[idx].leaf_column = &columns[idx].cudf_column; });
}
std::vector<std::vector<uint8_t>> writer::impl::gather_statistic_blobs(
orc_table_view const& orc_table, file_segmentation const& segmentation)
{
auto const num_stat_blobs = (1 + segmentation.num_stripes()) * orc_table.num_columns();
hostdevice_vector<stats_column_desc> stat_desc(orc_table.num_columns(), stream);
hostdevice_vector<statistics_merge_group> stat_merge(num_stat_blobs, stream);
for (auto const& column : orc_table.columns) {
stats_column_desc* desc = &stat_desc[column.index()];
switch (column.orc_kind()) {
case TypeKind::BYTE: desc->stats_dtype = dtype_int8; break;
case TypeKind::SHORT: desc->stats_dtype = dtype_int16; break;
case TypeKind::INT: desc->stats_dtype = dtype_int32; break;
case TypeKind::LONG: desc->stats_dtype = dtype_int64; break;
case TypeKind::FLOAT: desc->stats_dtype = dtype_float32; break;
case TypeKind::DOUBLE: desc->stats_dtype = dtype_float64; break;
case TypeKind::BOOLEAN: desc->stats_dtype = dtype_bool; break;
case TypeKind::DATE: desc->stats_dtype = dtype_int32; break;
case TypeKind::DECIMAL: desc->stats_dtype = dtype_decimal64; break;
case TypeKind::TIMESTAMP: desc->stats_dtype = dtype_timestamp64; break;
case TypeKind::STRING: desc->stats_dtype = dtype_string; break;
default: desc->stats_dtype = dtype_none; break;
}
desc->num_rows = column.size();
desc->num_values = column.size();
if (desc->stats_dtype == dtype_timestamp64) {
// Timestamp statistics are in milliseconds
switch (column.scale()) {
case 9: desc->ts_scale = 1000; break;
case 6: desc->ts_scale = 0; break;
case 3: desc->ts_scale = -1000; break;
case 0: desc->ts_scale = -1000000; break;
default: desc->ts_scale = 0; break;
}
} else {
desc->ts_scale = 0;
}
for (auto const& stripe : segmentation.stripes) {
auto grp = &stat_merge[column.index() * segmentation.num_stripes() + stripe.id];
grp->col = stat_desc.device_ptr(column.index());
grp->start_chunk =
static_cast<uint32_t>(column.index() * segmentation.num_rowgroups() + stripe.first);
grp->num_chunks = stripe.size;
}
statistics_merge_group* col_stats =
&stat_merge[segmentation.num_stripes() * orc_table.num_columns() + column.index()];
col_stats->col = stat_desc.device_ptr(column.index());
col_stats->start_chunk = static_cast<uint32_t>(column.index() * segmentation.num_stripes());
col_stats->num_chunks = static_cast<uint32_t>(segmentation.num_stripes());
}
stat_desc.host_to_device(stream);
stat_merge.host_to_device(stream);
set_stat_desc_leaf_cols(orc_table.d_columns, stat_desc, stream);
auto const num_chunks = segmentation.rowgroups.count();
rmm::device_uvector<statistics_chunk> stat_chunks(num_chunks + num_stat_blobs, stream);
rmm::device_uvector<statistics_group> stat_groups(num_chunks, stream);
gpu::orc_init_statistics_groups(
stat_groups.data(), stat_desc.device_ptr(), segmentation.rowgroups, stream);
detail::calculate_group_statistics<detail::io_file_format::ORC>(
stat_chunks.data(), stat_groups.data(), num_chunks, stream);
detail::merge_group_statistics<detail::io_file_format::ORC>(
stat_chunks.data() + num_chunks,
stat_chunks.data(),
stat_merge.device_ptr(),
segmentation.num_stripes() * orc_table.num_columns(),
stream);
detail::merge_group_statistics<detail::io_file_format::ORC>(
stat_chunks.data() + num_chunks + segmentation.num_stripes() * orc_table.num_columns(),
stat_chunks.data() + num_chunks,
stat_merge.device_ptr(segmentation.num_stripes() * orc_table.num_columns()),
orc_table.num_columns(),
stream);
gpu::orc_init_statistics_buffersize(
stat_merge.device_ptr(), stat_chunks.data() + num_chunks, num_stat_blobs, stream);
stat_merge.device_to_host(stream, true);
hostdevice_vector<uint8_t> blobs(
stat_merge[num_stat_blobs - 1].start_chunk + stat_merge[num_stat_blobs - 1].num_chunks, stream);
gpu::orc_encode_statistics(blobs.device_ptr(),
stat_merge.device_ptr(),
stat_chunks.data() + num_chunks,
num_stat_blobs,
stream);
stat_merge.device_to_host(stream);
blobs.device_to_host(stream, true);
std::vector<std::vector<uint8_t>> stat_blobs(num_stat_blobs);
for (size_t i = 0; i < num_stat_blobs; i++) {
const uint8_t* stat_begin = blobs.host_ptr(stat_merge[i].start_chunk);
const uint8_t* stat_end = stat_begin + stat_merge[i].num_chunks;
stat_blobs[i].assign(stat_begin, stat_end);
}
return stat_blobs;
}
void writer::impl::write_index_stream(int32_t stripe_id,
int32_t stream_id,
host_span<orc_column_view const> columns,
stripe_rowgroups const& rowgroups_range,
host_2dspan<gpu::encoder_chunk_streams const> enc_streams,
host_2dspan<gpu::StripeStream const> strm_desc,
host_span<gpu_inflate_status_s const> comp_out,
StripeInformation* stripe,
orc_streams* streams,
ProtobufWriter* pbw)
{
row_group_index_info present;
row_group_index_info data;
row_group_index_info data2;
auto kind = TypeKind::STRUCT;
auto const column_id = stream_id - 1;
auto find_record = [=, &strm_desc](gpu::encoder_chunk_streams const& stream,
gpu::StreamIndexType type) {
row_group_index_info record;
if (stream.ids[type] > 0) {
record.pos = 0;
if (compression_kind_ != NONE) {
auto const& ss = strm_desc[stripe_id][stream.ids[type] - (columns.size() + 1)];
record.blk_pos = ss.first_block;
record.comp_pos = 0;
record.comp_size = ss.stream_size;
}
}
return record;
};
auto scan_record = [=, &comp_out](gpu::encoder_chunk_streams const& stream,
gpu::StreamIndexType type,
row_group_index_info& record) {
if (record.pos >= 0) {
record.pos += stream.lengths[type];
while ((record.pos >= 0) && (record.blk_pos >= 0) &&
(static_cast<size_t>(record.pos) >= compression_blocksize_) &&
(record.comp_pos + 3 + comp_out[record.blk_pos].bytes_written <
static_cast<size_t>(record.comp_size))) {
record.pos -= compression_blocksize_;
record.comp_pos += 3 + comp_out[record.blk_pos].bytes_written;
record.blk_pos += 1;
}
}
};
// TBD: Not sure we need an empty index stream for column 0
if (stream_id != 0) {
const auto& strm = enc_streams[column_id][0];
present = find_record(strm, gpu::CI_PRESENT);
data = find_record(strm, gpu::CI_DATA);
data2 = find_record(strm, gpu::CI_DATA2);
// Change string dictionary to int from index point of view
kind = columns[column_id].orc_kind();
if (kind == TypeKind::STRING && columns[column_id].orc_encoding() == DICTIONARY_V2) {
kind = TypeKind::INT;
}
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
// Add row index entries
std::for_each(rowgroups_range.cbegin(), rowgroups_range.cend(), [&](auto rowgroup) {
pbw->put_row_index_entry(
present.comp_pos, present.pos, data.comp_pos, data.pos, data2.comp_pos, data2.pos, kind);
if (stream_id != 0) {
const auto& strm = enc_streams[column_id][rowgroup];
scan_record(strm, gpu::CI_PRESENT, present);
scan_record(strm, gpu::CI_DATA, data);
scan_record(strm, gpu::CI_DATA2, data2);
}
});
(*streams)[stream_id].length = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_ix_len = (uint32_t)((*streams)[stream_id].length - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
stripe->indexLength += buffer_.size();
}
void writer::impl::write_data_stream(gpu::StripeStream const& strm_desc,
gpu::encoder_chunk_streams const& enc_stream,
uint8_t const* compressed_data,
uint8_t* stream_out,
StripeInformation* stripe,
orc_streams* streams)
{
const auto length = strm_desc.stream_size;
(*streams)[enc_stream.ids[strm_desc.stream_type]].length = length;
if (length == 0) { return; }
const auto* stream_in = (compression_kind_ == NONE) ? enc_stream.data_ptrs[strm_desc.stream_type]
: (compressed_data + strm_desc.bfr_offset);
if (out_sink_->is_device_write_preferred(length)) {
out_sink_->device_write(stream_in, length, stream);
} else {
CUDA_TRY(
cudaMemcpyAsync(stream_out, stream_in, length, cudaMemcpyDeviceToHost, stream.value()));
stream.synchronize();
out_sink_->host_write(stream_out, length);
}
stripe->dataLength += length;
}
void writer::impl::add_uncompressed_block_headers(std::vector<uint8_t>& v)
{
if (compression_kind_ != NONE) {
size_t uncomp_len = v.size() - 3, pos = 0, block_len;
while (uncomp_len > compression_blocksize_) {
block_len = compression_blocksize_ * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
pos += 3 + compression_blocksize_;
v.insert(v.begin() + pos, 3, 0);
uncomp_len -= compression_blocksize_;
}
block_len = uncomp_len * 2 + 1;
v[pos + 0] = static_cast<uint8_t>(block_len >> 0);
v[pos + 1] = static_cast<uint8_t>(block_len >> 8);
v[pos + 2] = static_cast<uint8_t>(block_len >> 16);
}
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
user_metadata(options.get_metadata()),
stream(stream),
_mr(mr)
{
init_state();
}
writer::impl::impl(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: compression_kind_(to_orc_compression(options.get_compression())),
enable_statistics_(options.enable_statistics()),
out_sink_(std::move(sink)),
single_write_mode(mode == SingleWriteMode::YES),
stream(stream),
_mr(mr)
{
if (options.get_metadata() != nullptr) {
user_metadata_with_nullability = *options.get_metadata();
user_metadata = &user_metadata_with_nullability;
}
init_state();
}
writer::impl::~impl() { close(); }
void writer::impl::init_state()
{
// Write file header
out_sink_->host_write(MAGIC, std::strlen(MAGIC));
}
/**
* @brief pre-order append ORC device columns
*/
void __device__ append_orc_device_column(uint32_t& idx,
thrust::optional<uint32_t> parent_idx,
device_span<orc_column_device_view> cols,
column_device_view col)
{
auto const current_idx = idx;
cols[current_idx] = orc_column_device_view{col, parent_idx};
idx++;
if (col.type().id() == type_id::LIST) {
append_orc_device_column(
idx, current_idx, cols, col.child(lists_column_view::child_column_index));
}
if (col.type().id() == type_id::STRUCT) {
for (auto child_idx = 0; child_idx < col.num_child_columns(); ++child_idx) {
append_orc_device_column(idx, current_idx, cols, col.child(child_idx));
}
}
};
orc_table_view make_orc_table_view(table_view const& table,
table_device_view const& d_table,
table_metadata const* user_metadata,
rmm::cuda_stream_view stream)
{
std::vector<orc_column_view> orc_columns;
std::vector<uint32_t> str_col_indexes;
std::function<void(column_view const&, int)> append_orc_column = [&](column_view const& col,
int index_in_table) {
int const str_idx =
(col.type().id() == type_id::STRING) ? static_cast<int>(str_col_indexes.size()) : -1;
auto const& new_col =
orc_columns.emplace_back(orc_columns.size(), str_idx, index_in_table, col, user_metadata);
if (new_col.is_string()) { str_col_indexes.push_back(new_col.index()); }
if (col.type().id() == type_id::LIST)
append_orc_column(col.child(lists_column_view::child_column_index), -1);
if (col.type().id() == type_id::STRUCT)
for (auto child = col.child_begin(); child != col.child_end(); ++child)
append_orc_column(*child, -1);
};
for (auto col_idx = 0; col_idx < table.num_columns(); ++col_idx) {
append_orc_column(table.column(col_idx), col_idx);
}
rmm::device_uvector<orc_column_device_view> d_orc_columns(orc_columns.size(), stream);
cudf::detail::device_single_thread(
[d_orc_cols = device_span<orc_column_device_view>{d_orc_columns},
d_table = d_table] __device__() mutable {
uint32_t idx = 0;
for (auto const& column : d_table) {
append_orc_device_column(idx, thrust::nullopt, d_orc_cols, column);
}
},
stream);
return {std::move(orc_columns),
std::move(d_orc_columns),
str_col_indexes,
cudf::detail::make_device_uvector_sync(str_col_indexes, stream)};
}
hostdevice_2dvector<rowgroup_rows> calculate_rowgroup_bounds(orc_table_view const& orc_table,
size_type rowgroup_size,
rmm::cuda_stream_view stream)
{
auto const num_rowgroups =
cudf::util::div_rounding_up_unsafe<size_t, size_t>(orc_table.num_rows(), rowgroup_size);
hostdevice_2dvector<rowgroup_rows> rowgroup_bounds(
num_rowgroups, orc_table.num_columns(), stream);
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
num_rowgroups,
[cols = device_span<orc_column_device_view const>{orc_table.d_columns},
rg_bounds = device_2dspan<rowgroup_rows>{rowgroup_bounds},
rowgroup_size] __device__(auto rg_idx) mutable {
thrust::transform(
thrust::seq, cols.begin(), cols.end(), rg_bounds[rg_idx].begin(), [&](auto const& col) {
// Root column
if (!col.parent_index.has_value()) {
size_type const rows_begin = rg_idx * rowgroup_size;
auto const rows_end =
thrust::min<size_type>((rg_idx + 1) * rowgroup_size, col.cudf_column.size());
return rowgroup_rows{rows_begin, rows_end};
} else {
// Child column
auto const parent_index = *col.parent_index;
column_device_view parent_col = cols[parent_index].cudf_column;
if (parent_col.type().id() != type_id::LIST) return rg_bounds[rg_idx][parent_index];
auto parent_offsets = parent_col.child(lists_column_view::offsets_column_index);
auto const& parent_rowgroup_rows = rg_bounds[rg_idx][parent_index];
auto const rows_begin = parent_offsets.element<size_type>(parent_rowgroup_rows.begin);
auto const rows_end = parent_offsets.element<size_type>(parent_rowgroup_rows.end);
return rowgroup_rows{rows_begin, rows_end};
}
});
});
rowgroup_bounds.device_to_host(stream, true);
return rowgroup_bounds;
}
// returns host vector of per-rowgroup sizes
encoder_decimal_info decimal_chunk_sizes(orc_table_view& orc_table,
file_segmentation const& segmentation,
rmm::cuda_stream_view stream)
{
std::map<uint32_t, rmm::device_uvector<uint32_t>> elem_sizes;
// Compute per-element offsets (within each row group) on the device
for (auto& orc_col : orc_table.columns) {
if (orc_col.orc_kind() == DECIMAL) {
auto& current_sizes =
elem_sizes.insert({orc_col.index(), rmm::device_uvector<uint32_t>(orc_col.size(), stream)})
.first->second;
thrust::tabulate(rmm::exec_policy(stream),
current_sizes.begin(),
current_sizes.end(),
[d_cols = device_span<orc_column_device_view const>{orc_table.d_columns},
col_idx = orc_col.index()] __device__(auto idx) {
auto const& col = d_cols[col_idx].cudf_column;
if (col.is_null(idx)) return 0u;
int64_t const element = (col.type().id() == type_id::DECIMAL32)
? col.element<int32_t>(idx)
: col.element<int64_t>(idx);
int64_t const sign = (element < 0) ? 1 : 0;
uint64_t zigzaged_value = ((element ^ -sign) * 2) + sign;
uint32_t encoded_length = 1;
while (zigzaged_value > 127) {
zigzaged_value >>= 7u;
++encoded_length;
}
return encoded_length;
});
// Compute element offsets within each row group
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0ul),
segmentation.num_rowgroups(),
[sizes = device_span<uint32_t>{current_sizes},
rg_bounds = device_2dspan<rowgroup_rows const>{segmentation.rowgroups},
col_idx = orc_col.index()] __device__(auto rg_idx) {
auto const& range = rg_bounds[rg_idx][col_idx];
thrust::inclusive_scan(thrust::seq,
sizes.begin() + range.begin,
sizes.begin() + range.end,
sizes.begin() + range.begin);
});
orc_col.attach_decimal_offsets(current_sizes.data());
}
}
if (elem_sizes.empty()) return {};
// Gather the row group sizes and copy to host
auto d_tmp_rowgroup_sizes = rmm::device_uvector<uint32_t>(segmentation.num_rowgroups(), stream);
std::map<uint32_t, std::vector<uint32_t>> rg_sizes;
for (auto const& [col_idx, esizes] : elem_sizes) {
// Copy last elem in each row group - equal to row group size
thrust::tabulate(rmm::exec_policy(stream),
d_tmp_rowgroup_sizes.begin(),
d_tmp_rowgroup_sizes.end(),
[src = esizes.data(),
col_idx = col_idx,
rg_bounds = device_2dspan<rowgroup_rows const>{
segmentation.rowgroups}] __device__(auto idx) {
return src[rg_bounds[idx][col_idx].end - 1];
});
rg_sizes[col_idx] = cudf::detail::make_std_vector_async(d_tmp_rowgroup_sizes, stream);
}
return {std::move(elem_sizes), std::move(rg_sizes)};
}
std::map<uint32_t, size_t> decimal_column_sizes(
std::map<uint32_t, std::vector<uint32_t>> const& chunk_sizes)
{
std::map<uint32_t, size_t> column_sizes;
std::transform(chunk_sizes.cbegin(),
chunk_sizes.cend(),
std::inserter(column_sizes, column_sizes.end()),
[](auto const& chunk_size) -> std::pair<uint32_t, size_t> {
return {
chunk_size.first,
std::accumulate(chunk_size.second.cbegin(), chunk_size.second.cend(), 0lu)};
});
return column_sizes;
}
string_dictionaries allocate_dictionaries(orc_table_view const& orc_table,
host_2dspan<rowgroup_rows const> rowgroup_bounds,
rmm::cuda_stream_view stream)
{
thrust::host_vector<bool> is_dict_enabled(orc_table.num_columns());
for (auto col_idx : orc_table.string_column_indices)
is_dict_enabled[col_idx] = std::all_of(
thrust::make_counting_iterator(0ul),
thrust::make_counting_iterator(rowgroup_bounds.size().first),
[&](auto rg_idx) {
return rowgroup_bounds[rg_idx][col_idx].size() < std::numeric_limits<uint16_t>::max();
});
std::vector<rmm::device_uvector<uint32_t>> data;
std::transform(orc_table.string_column_indices.begin(),
orc_table.string_column_indices.end(),
std::back_inserter(data),
[&](auto& idx) {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
orc_table.columns[idx].size(), stream);
});
std::vector<rmm::device_uvector<uint32_t>> index;
std::transform(orc_table.string_column_indices.begin(),
orc_table.string_column_indices.end(),
std::back_inserter(index),
[&](auto& idx) {
return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(
orc_table.columns[idx].size(), stream);
});
stream.synchronize();
std::vector<device_span<uint32_t>> data_ptrs;
std::transform(data.begin(), data.end(), std::back_inserter(data_ptrs), [](auto& uvec) {
return device_span<uint32_t>{uvec};
});
std::vector<device_span<uint32_t>> index_ptrs;
std::transform(index.begin(), index.end(), std::back_inserter(index_ptrs), [](auto& uvec) {
return device_span<uint32_t>{uvec};
});
return {std::move(data),
std::move(index),
cudf::detail::make_device_uvector_sync(data_ptrs, stream),
cudf::detail::make_device_uvector_sync(index_ptrs, stream),
std::move(is_dict_enabled)};
}
void writer::impl::write(table_view const& table)
{
CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed");
auto const num_rows = table.num_rows();
auto const d_table = table_device_view::create(table, stream);
auto orc_table = make_orc_table_view(table, *d_table, user_metadata, stream);
auto rowgroup_bounds = calculate_rowgroup_bounds(orc_table, row_index_stride_, stream);
// Build per-column dictionary indices
auto dictionaries = allocate_dictionaries(orc_table, rowgroup_bounds, stream);
hostdevice_2dvector<gpu::DictionaryChunk> dict(
rowgroup_bounds.size().first, orc_table.num_string_columns(), stream);
if (orc_table.num_string_columns() != 0) {
init_dictionaries(orc_table,
rowgroup_bounds,
dictionaries.d_data_view,
dictionaries.d_index_view,
&dict,
stream);
}
// Decide stripe boundaries based on rowgroups and dict chunks
auto const segmentation =
calculate_segmentation(orc_table.columns, std::move(rowgroup_bounds), max_stripe_size_);
// Build stripe-level dictionaries
hostdevice_2dvector<gpu::StripeDictionary> stripe_dict(
segmentation.num_stripes(), orc_table.num_string_columns(), stream);
if (orc_table.num_string_columns() != 0) {
build_dictionaries(orc_table,
segmentation.stripes,
dict,
dictionaries.index,
dictionaries.dictionary_enabled,
stripe_dict);
}
auto dec_chunk_sizes = decimal_chunk_sizes(orc_table, segmentation, stream);
auto streams =
create_streams(orc_table.columns, segmentation, decimal_column_sizes(dec_chunk_sizes.rg_sizes));
auto enc_data = encode_columns(
orc_table, std::move(dictionaries), std::move(dec_chunk_sizes), segmentation, streams);
// Assemble individual disparate column chunks into contiguous data streams
size_type const num_index_streams = (orc_table.num_columns() + 1);
const auto num_data_streams = streams.size() - num_index_streams;
hostdevice_2dvector<gpu::StripeStream> strm_descs(
segmentation.num_stripes(), num_data_streams, stream);
auto stripes = gather_stripes(num_index_streams, segmentation, &enc_data.streams, &strm_descs);
// Gather column statistics
std::vector<ColStatsBlob> column_stats;
if (enable_statistics_ && table.num_columns() > 0 && num_rows > 0) {
column_stats = gather_statistic_blobs(orc_table, segmentation);
}
// Allocate intermediate output stream buffer
size_t compressed_bfr_size = 0;
size_t num_compressed_blocks = 0;
auto stream_output = [&]() {
size_t max_stream_size = 0;
bool all_device_write = true;
for (size_t stripe_id = 0; stripe_id < segmentation.num_stripes(); stripe_id++) {
for (size_t i = 0; i < num_data_streams; i++) { // TODO range for (at least)
gpu::StripeStream* ss = &strm_descs[stripe_id][i];
if (!out_sink_->is_device_write_preferred(ss->stream_size)) { all_device_write = false; }
size_t stream_size = ss->stream_size;
if (compression_kind_ != NONE) {
ss->first_block = num_compressed_blocks;
ss->bfr_offset = compressed_bfr_size;
auto num_blocks = std::max<uint32_t>(
(stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1);
stream_size += num_blocks * 3;
num_compressed_blocks += num_blocks;
compressed_bfr_size += stream_size;
}
max_stream_size = std::max(max_stream_size, stream_size);
}
}
if (all_device_write) {
return pinned_buffer<uint8_t>{nullptr, cudaFreeHost};
} else {
return pinned_buffer<uint8_t>{[](size_t size) {
uint8_t* ptr = nullptr;
CUDA_TRY(cudaMallocHost(&ptr, size));
return ptr;
}(max_stream_size),
cudaFreeHost};
}
}();
// Compress the data streams
rmm::device_buffer compressed_data(compressed_bfr_size, stream);
hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks, stream);
hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks, stream);
if (compression_kind_ != NONE) {
strm_descs.host_to_device(stream);
gpu::CompressOrcDataStreams(static_cast<uint8_t*>(compressed_data.data()),
num_compressed_blocks,
compression_kind_,
compression_blocksize_,
strm_descs,
enc_data.streams,
comp_in.device_ptr(),
comp_out.device_ptr(),
stream);
strm_descs.device_to_host(stream);
comp_out.device_to_host(stream, true);
}
ProtobufWriter pbw_(&buffer_);
// Write stripes
for (size_t stripe_id = 0; stripe_id < stripes.size(); ++stripe_id) {
auto const& rowgroups_range = segmentation.stripes[stripe_id];
auto& stripe = stripes[stripe_id];
stripe.offset = out_sink_->bytes_written();
// Column (skippable) index streams appear at the start of the stripe
for (size_type stream_id = 0; stream_id < num_index_streams; ++stream_id) {
write_index_stream(stripe_id,
stream_id,
orc_table.columns,
rowgroups_range,
enc_data.streams,
strm_descs,
comp_out,
&stripe,
&streams,
&pbw_);
}
// Column data consisting one or more separate streams
for (auto const& strm_desc : strm_descs[stripe_id]) {
write_data_stream(strm_desc,
enc_data.streams[strm_desc.column_id][rowgroups_range.first],
static_cast<uint8_t*>(compressed_data.data()),
stream_output.get(),
&stripe,
&streams);
}
// Write stripefooter consisting of stream information
StripeFooter sf;
sf.streams = streams;
sf.columns.resize(orc_table.num_columns() + 1);
sf.columns[0].kind = DIRECT;
for (size_t i = 1; i < sf.columns.size(); ++i) {
sf.columns[i].kind = orc_table.column(i - 1).orc_encoding();
sf.columns[i].dictionarySize =
(sf.columns[i].kind == DICTIONARY_V2)
? orc_table.column(i - 1).host_stripe_dict(stripe_id)->num_strings
: 0;
if (orc_table.column(i - 1).orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; }
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(sf);
stripe.footerLength = buffer_.size();
if (compression_kind_ != NONE) {
uint32_t uncomp_sf_len = (stripe.footerLength - 3) * 2 + 1;
buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0);
buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8);
buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16);
}
out_sink_->host_write(buffer_.data(), buffer_.size());
}
if (column_stats.size() != 0) {
// File-level statistics
// NOTE: Excluded from chunked write mode to avoid the need for merging stats across calls
if (single_write_mode) {
// First entry contains total number of rows
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(num_rows);
ff.statistics.reserve(1 + orc_table.num_columns());
ff.statistics.emplace_back(std::move(buffer_));
// Add file stats, stored after stripe stats in `column_stats`
ff.statistics.insert(
ff.statistics.end(),
std::make_move_iterator(column_stats.begin()) + stripes.size() * orc_table.num_columns(),
std::make_move_iterator(column_stats.end()));
}
// Stripe-level statistics
size_t first_stripe = md.stripeStats.size();
md.stripeStats.resize(first_stripe + stripes.size());
for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) {
md.stripeStats[first_stripe + stripe_id].colStats.resize(1 + orc_table.num_columns());
buffer_.resize(0);
pbw_.putb(1 * 8 + PB_TYPE_VARINT);
pbw_.put_uint(stripes[stripe_id].numberOfRows);
md.stripeStats[first_stripe + stripe_id].colStats[0] = std::move(buffer_);
for (size_t col_idx = 0; col_idx < orc_table.num_columns(); col_idx++) {
size_t idx = stripes.size() * col_idx + stripe_id;
if (idx < column_stats.size()) {
md.stripeStats[first_stripe + stripe_id].colStats[1 + col_idx] =
std::move(column_stats[idx]);
}
}
}
}
if (ff.headerLength == 0) {
// First call
ff.headerLength = std::strlen(MAGIC);
ff.rowIndexStride = row_index_stride_;
ff.types.resize(1 + orc_table.num_columns());
ff.types[0].kind = STRUCT;
for (auto const& column : orc_table.columns) {
if (!column.is_child()) {
ff.types[0].subtypes.emplace_back(column.id());
ff.types[0].fieldNames.emplace_back(column.orc_name());
}
}
for (auto const& column : orc_table.columns) {
auto& schema_type = ff.types[column.id()];
schema_type.kind = column.orc_kind();
if (column.orc_kind() == DECIMAL) {
schema_type.scale = static_cast<uint32_t>(column.scale());
schema_type.precision = column.precision();
}
// In preorder traversal the column after a list column is always the child column
if (column.orc_kind() == LIST) { schema_type.subtypes.emplace_back(column.id() + 1); }
}
} else {
// verify the user isn't passing mismatched tables
CUDF_EXPECTS(ff.types.size() == 1 + orc_table.num_columns(),
"Mismatch in table structure between multiple calls to write");
CUDF_EXPECTS(
std::all_of(orc_table.columns.cbegin(),
orc_table.columns.cend(),
[&](auto const& col) { return ff.types[col.id()].kind == col.orc_kind(); }),
"Mismatch in column types between multiple calls to write");
}
ff.stripes.insert(ff.stripes.end(),
std::make_move_iterator(stripes.begin()),
std::make_move_iterator(stripes.end()));
ff.numberOfRows += num_rows;
}
void writer::impl::close()
{
if (closed) { return; }
closed = true;
ProtobufWriter pbw_(&buffer_);
PostScript ps;
ff.contentLength = out_sink_->bytes_written();
if (user_metadata) {
for (auto it = user_metadata->user_data.begin(); it != user_metadata->user_data.end(); it++) {
ff.metadata.push_back({it->first, it->second});
}
}
// Write statistics metadata
if (md.stripeStats.size() != 0) {
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(md);
add_uncompressed_block_headers(buffer_);
ps.metadataLength = buffer_.size();
out_sink_->host_write(buffer_.data(), buffer_.size());
} else {
ps.metadataLength = 0;
}
buffer_.resize((compression_kind_ != NONE) ? 3 : 0);
pbw_.write(ff);
add_uncompressed_block_headers(buffer_);
// Write postscript metadata
ps.footerLength = buffer_.size();
ps.compression = compression_kind_;
ps.compressionBlockSize = compression_blocksize_;
ps.version = {0, 12};
ps.magic = MAGIC;
const auto ps_length = static_cast<uint8_t>(pbw_.write(ps));
buffer_.push_back(ps_length);
out_sink_->host_write(buffer_.data(), buffer_.size());
out_sink_->flush();
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr))
{
}
// Forward to implementation
writer::writer(std::unique_ptr<data_sink> sink,
chunked_orc_writer_options const& options,
SingleWriteMode mode,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: _impl(std::make_unique<impl>(std::move(sink), options, mode, stream, mr))
{
}
// Destructor within this translation unit
writer::~writer() = default;
// Forward to implementation
void writer::write(table_view const& table) { _impl->write(table); }
// Forward to implementation
void writer::close() { _impl->close(); }
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
d4be5beaab0bfe42999de70e30720b994eef2dad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void BinaryErosion (unsigned int *dst, int imageW, int imageH, int mask_w, int mask_h)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
int match = 0;
for (int m = ix - mask_w ; m < ix + mask_w && !match; m++){
for (int n = iy - mask_h ; n < iy + mask_h && !match; n++){
float4 fresult = tex2D(texUCHAR, m, n);
if (fresult.x == 1.f && fresult.y == 1.f && fresult.z == 1.f )
match = 1;
}
}
if(!match)
dst[imageW * iy + ix] = make_color(0.f, 0.f, 0.f , 1.f);
else
dst[imageW * iy + ix] = make_color(1.f, 1.f, 1.f , 1.f);
}
}
extern "C" float binaryErosionWrapper (unsigned int *dst, int imageW, int imageH, int threshold, int iteration, float brightness, float contrast, int mask_w, int mask_h, int adjust)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
unsigned int timer;
float runtime;
cutCreateTimer(&timer);
cutStartTimer(timer);
if(adjust)
hipLaunchKernelGGL(( Grayscale), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, brightness, contrast);
else
hipLaunchKernelGGL(( Grayscale2), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH);
hipLaunchKernelGGL(( Binarize), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, threshold);
for(int i=0; i<iteration; i++)
{
hipMemcpyToArray( d_tempArray, 0, 0, dst, imageW * imageH * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipBindTextureToArray(texUCHAR, d_tempArray);
hipLaunchKernelGGL(( BinaryErosion), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, mask_w, mask_h);
}
hipUnbindTexture(texUCHAR);
hipDeviceSynchronize();
cutStopTimer(timer);
runtime = cutGetTimerValue(timer)/1000;
cutDeleteTimer(timer);
return runtime;
}
|
d4be5beaab0bfe42999de70e30720b994eef2dad.cu
|
__global__ void BinaryErosion (unsigned int *dst, int imageW, int imageH, int mask_w, int mask_h)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
int match = 0;
for (int m = ix - mask_w ; m < ix + mask_w && !match; m++){
for (int n = iy - mask_h ; n < iy + mask_h && !match; n++){
float4 fresult = tex2D(texUCHAR, m, n);
if (fresult.x == 1.f && fresult.y == 1.f && fresult.z == 1.f )
match = 1;
}
}
if(!match)
dst[imageW * iy + ix] = make_color(0.f, 0.f, 0.f , 1.f);
else
dst[imageW * iy + ix] = make_color(1.f, 1.f, 1.f , 1.f);
}
}
extern "C" float binaryErosionWrapper (unsigned int *dst, int imageW, int imageH, int threshold, int iteration, float brightness, float contrast, int mask_w, int mask_h, int adjust)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
unsigned int timer;
float runtime;
cutCreateTimer(&timer);
cutStartTimer(timer);
if(adjust)
Grayscale<<<grid, threads>>>(dst, imageW, imageH, brightness, contrast);
else
Grayscale2<<<grid, threads>>>(dst, imageW, imageH);
Binarize<<<grid, threads>>>(dst, imageW, imageH, threshold);
for(int i=0; i<iteration; i++)
{
cudaMemcpyToArray( d_tempArray, 0, 0, dst, imageW * imageH * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaBindTextureToArray(texUCHAR, d_tempArray);
BinaryErosion<<<grid, threads>>>(dst, imageW, imageH, mask_w, mask_h);
}
cudaUnbindTexture(texUCHAR);
cudaThreadSynchronize();
cutStopTimer(timer);
runtime = cutGetTimerValue(timer)/1000;
cutDeleteTimer(timer);
return runtime;
}
|
4fee75623ab962c64befa2e106cad5961397ec79.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "uniforms.cuh"
extern "C"
{
__constant__ UniformBuffer camera;
__constant__ float uniform[48];
texture<float4, hipTextureType2D> texf;
__constant__ uint64_t stippling_mask;
}
|
4fee75623ab962c64befa2e106cad5961397ec79.cu
|
#include "uniforms.cuh"
extern "C"
{
__constant__ UniformBuffer camera;
__constant__ float uniform[48];
texture<float4, cudaTextureType2D> texf;
__constant__ uint64_t stippling_mask;
}
|
ab7df77cfca05f6480cbf1f6add255132132971a.hip
|
// !!! This is a file automatically generated by hipify!!!
%%cuda --name week4_Programming_Assignment.cu
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
void print_array(float *A, int N){
for(int i=0;i<N;i++)
printf("%.2f ",A[i]);
printf("\n");
}
//--------------------------------------------------------------------------------------------------------------
__global__ void
process_kernel1(float *input1, float *input2, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum=blockIdx.z*(gridDim.x * gridDim.y)+blockIdx.y * gridDim.x+blockIdx.x;
int threadNum=threadIdx.z*(blockDim.x* blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x;
int i= blockNum*blockDim.x*blockDim.y*blockDim.z+threadNum;
if (i < numElements){
output[i] = sinf(input1[i]) + cosf(input2[i]);
}
}
__global__ void
process_kernel2(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum=blockIdx.z*(gridDim.x * gridDim.y)+blockIdx.y * gridDim.x+blockIdx.x;
int threadNum=threadIdx.z*(blockDim.x* blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x;
int i= blockNum*blockDim.x*blockDim.y*blockDim.z+threadNum;
if (i < numElements)
{
output[i] = logf(input[i]);
}
}
__global__ void
process_kernel3(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum=blockIdx.z*(gridDim.x * gridDim.y)+blockIdx.y * gridDim.x+blockIdx.x;
int threadNum=threadIdx.z*(blockDim.x* blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x;
int i= blockNum*blockDim.x*blockDim.y*blockDim.z+threadNum;
if (i < numElements)
{
output[i] = sqrtf(input[i]);
}
}
//------------------------------------------------------------------------------------------------------------------------------------------
int main(void){
hipError_t err = hipSuccess;
int numElements = 16384;
size_t size = numElements * sizeof(float);
float *h_input1 = (float *)malloc(size);
float *h_input2 = (float *)malloc(size);
float *h_output1 = (float *)malloc(size);
float *h_output2 = (float *)malloc(size);
float *h_output3 = (float *)malloc(size);
if (h_input1 == NULL || h_input2 == NULL || h_output1 == NULL || h_output2 == NULL || h_output3 == NULL){
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < numElements; ++i){
scanf("%f",&h_input1[i]);
}
for (int i = 0; i < numElements; ++i){
scanf("%f",&h_input2[i]);
}
float *d_input1 = NULL;
err = hipMalloc((void **)&d_input1, size);
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_input2 = NULL;
err = hipMalloc((void **)&d_input2, size);
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector d_input2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output1 = NULL;
err = hipMalloc((void **)&d_output1, size);
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector h_output1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output2 = NULL;
err = hipMalloc((void **)&d_output2, size);
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector h_output2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output3 = NULL;
err = hipMalloc((void **)&d_output3, size);
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector h_output3 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_input1, h_input1, size, hipMemcpyHostToDevice);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector h_input1 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_input2, h_input2, size, hipMemcpyHostToDevice);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector h_input2 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel1 -------------------------------------------------------------------------------------
hipLaunchKernelGGL(( process_kernel1), dim3(dim3(4,2,2)),dim3(dim3(32,32,1)), 0, 0, d_input1, d_input2, d_output1, size);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel2 ------------------------------------------------------------------------------------
hipLaunchKernelGGL(( process_kernel2), dim3(dim3(2,8,1)),dim3(dim3(8,8,16)), 0, 0, d_output1,d_output2, size);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel3 -------------------------------------------------------------------------------------
hipLaunchKernelGGL(( process_kernel3), dim3(dim3(16,1,1)),dim3(dim3(128,8,1)), 0, 0, d_output2, d_output3, size);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//--------------------------------------------------------------------------------------------------------------------------------------------
// printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_output1, d_output1, size, hipMemcpyDeviceToHost);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector d_output1 from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_output2, d_output2, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_output2 from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_output3, d_output3, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_output3 from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vectors are as expected
for (int i = 0; i < numElements; ++i)
{
if (fabs(sinf(h_input1[i]) + cosf(h_input2[i]) - h_output1[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output1 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(logf(h_output1[i]) - h_output2[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output2 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(sqrtf(h_output2[i]) - h_output3[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output3 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
print_array(h_output3,numElements);
err = hipFree(d_input1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_input2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_input2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_output1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_output2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_output3);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector d_output3 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_input1);
free(h_input2);
free(h_output1);
free(h_output2);
free(h_output3);
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
ab7df77cfca05f6480cbf1f6add255132132971a.cu
|
%%cuda --name week4_Programming_Assignment.cu
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
void print_array(float *A, int N){
for(int i=0;i<N;i++)
printf("%.2f ",A[i]);
printf("\n");
}
//--------------------------------------------------------------------------------------------------------------
__global__ void
process_kernel1(float *input1, float *input2, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum=blockIdx.z*(gridDim.x * gridDim.y)+blockIdx.y * gridDim.x+blockIdx.x;
int threadNum=threadIdx.z*(blockDim.x* blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x;
int i= blockNum*blockDim.x*blockDim.y*blockDim.z+threadNum;
if (i < numElements){
output[i] = sinf(input1[i]) + cosf(input2[i]);
}
}
__global__ void
process_kernel2(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum=blockIdx.z*(gridDim.x * gridDim.y)+blockIdx.y * gridDim.x+blockIdx.x;
int threadNum=threadIdx.z*(blockDim.x* blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x;
int i= blockNum*blockDim.x*blockDim.y*blockDim.z+threadNum;
if (i < numElements)
{
output[i] = logf(input[i]);
}
}
__global__ void
process_kernel3(float *input, float *output, int datasize)
{
int numElements = datasize / sizeof(float);
//Write code for i
int blockNum=blockIdx.z*(gridDim.x * gridDim.y)+blockIdx.y * gridDim.x+blockIdx.x;
int threadNum=threadIdx.z*(blockDim.x* blockDim.y)+threadIdx.y*blockDim.x+threadIdx.x;
int i= blockNum*blockDim.x*blockDim.y*blockDim.z+threadNum;
if (i < numElements)
{
output[i] = sqrtf(input[i]);
}
}
//------------------------------------------------------------------------------------------------------------------------------------------
int main(void){
cudaError_t err = cudaSuccess;
int numElements = 16384;
size_t size = numElements * sizeof(float);
float *h_input1 = (float *)malloc(size);
float *h_input2 = (float *)malloc(size);
float *h_output1 = (float *)malloc(size);
float *h_output2 = (float *)malloc(size);
float *h_output3 = (float *)malloc(size);
if (h_input1 == NULL || h_input2 == NULL || h_output1 == NULL || h_output2 == NULL || h_output3 == NULL){
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < numElements; ++i){
scanf("%f",&h_input1[i]);
}
for (int i = 0; i < numElements; ++i){
scanf("%f",&h_input2[i]);
}
float *d_input1 = NULL;
err = cudaMalloc((void **)&d_input1, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector d_input1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_input2 = NULL;
err = cudaMalloc((void **)&d_input2, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector d_input2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output1 = NULL;
err = cudaMalloc((void **)&d_output1, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector h_output1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output2 = NULL;
err = cudaMalloc((void **)&d_output2, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector h_output2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output3 = NULL;
err = cudaMalloc((void **)&d_output3, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector h_output3 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_input1, h_input1, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector h_input1 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_input2, h_input2, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector h_input2 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel1 -------------------------------------------------------------------------------------
process_kernel1<<<dim3(4,2,2),dim3(32,32,1)>>>(d_input1, d_input2, d_output1, size);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel2 ------------------------------------------------------------------------------------
process_kernel2<<<dim3(2,8,1),dim3(8,8,16)>>>(d_output1,d_output2, size);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Complete Code for launching process_kernel3 -------------------------------------------------------------------------------------
process_kernel3<<<dim3(16,1,1),dim3(128,8,1)>>>(d_output2, d_output3, size);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//--------------------------------------------------------------------------------------------------------------------------------------------
// printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_output1, d_output1, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector d_output1 from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_output2, d_output2, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_output2 from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_output3, d_output3, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_output3 from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vectors are as expected
for (int i = 0; i < numElements; ++i)
{
if (fabs(sinf(h_input1[i]) + cosf(h_input2[i]) - h_output1[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output1 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(logf(h_output1[i]) - h_output2[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output2 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
for (int i = 0; i < numElements; ++i)
{
if (fabs(sqrtf(h_output2[i]) - h_output3[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output3 failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
print_array(h_output3,numElements);
err = cudaFree(d_input1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_input1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_input2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_input2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_output1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_output2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output3);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_output3 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_input1);
free(h_input2);
free(h_output1);
free(h_output2);
free(h_output3);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
bd2ecccc910f4d6bb2d5e1f67e8ca8d0d95e9f12.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y){
x = _x, y = _y;
}
__device__ void set(float _x, float _y){
x = _x; y = _y;
}
__device__ Point operator +(const Point &b)const{
return Point(x + b.x, y + b.y);
}
__device__ Point operator -(const Point &b)const{
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b){
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){
int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) &&
min(q1.x,q2.x) <= max(p1.x,p2.x) &&
min(p1.y,p2.y) <= max(q1.y,q2.y) &&
min(q1.y,q2.y) <= max(p1.y,p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p){
//params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if(fabs(s5 - s1) > EPS){
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
}
else{
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){
float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x;
float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){
return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b){
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
#ifdef DEBUG
printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle,
b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++){
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]);
if (flag){
poly_center = poly_center + cross_points[cnt];
cnt++;
#ifdef DEBUG
printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n",
cross_points[cnt - 1].x, cross_points[cnt - 1].y,
box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y,
box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y);
#endif
}
}
}
// check corners
for (int k = 0; k < 4; k++){
if (check_in_box2d(box_a, box_b_corners[k])){
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
#ifdef DEBUG
printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y);
#endif
}
if (check_in_box2d(box_b, box_a_corners[k])){
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
#ifdef DEBUG
printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y);
#endif
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++){
for (int i = 0; i < cnt - j - 1; i++){
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++){
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++){
area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b){
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 7;
const float * cur_box_b = boxes_b + b_idx * 7;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_aligned_overlap_kernel(const int num_box, const float *boxes_a, const float *boxes_b, float *ans_overlap){
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
const int idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (idx >= num_box){
return;
}
const float * cur_box_a = boxes_a + idx * 7;
const float * cur_box_b = boxes_b + idx * 7;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 7;
const float * cur_box_b = boxes_b + b_idx * 7;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const * const a, float const * const b) {
//params: a: [x, y, z, dx, dy, dz, heading]
//params: b: [x, y, z, dx, dy, dz, heading]
float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2);
float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = a[3] * a[4];
float Sb = b[3] * b[4];
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( boxes_overlap_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b, ans_overlap);
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesalignedoverlapLauncher(const int num_box, const float *boxes_a, const float *boxes_b, float *ans_overlap){
dim3 blocks(DIVUP(num_box, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( boxes_aligned_overlap_kernel), dim3(blocks), dim3(threads), 0, 0, num_box, boxes_a, boxes_b, ans_overlap);
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( boxes_iou_bev_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b, ans_iou);
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
hipLaunchKernelGGL(( nms_normal_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes, mask);
}
|
bd2ecccc910f4d6bb2d5e1f67e8ca8d0d95e9f12.cu
|
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y){
x = _x, y = _y;
}
__device__ void set(float _x, float _y){
x = _x; y = _y;
}
__device__ Point operator +(const Point &b)const{
return Point(x + b.x, y + b.y);
}
__device__ Point operator -(const Point &b)const{
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b){
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){
int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) &&
min(q1.x,q2.x) <= max(p1.x,p2.x) &&
min(p1.y,p2.y) <= max(q1.y,q2.y) &&
min(q1.y,q2.y) <= max(p1.y,p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p){
//params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if(fabs(s5 - s1) > EPS){
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
}
else{
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){
float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x;
float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){
return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b){
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
#ifdef DEBUG
printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle,
b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++){
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]);
if (flag){
poly_center = poly_center + cross_points[cnt];
cnt++;
#ifdef DEBUG
printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n",
cross_points[cnt - 1].x, cross_points[cnt - 1].y,
box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y,
box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y);
#endif
}
}
}
// check corners
for (int k = 0; k < 4; k++){
if (check_in_box2d(box_a, box_b_corners[k])){
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
#ifdef DEBUG
printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y);
#endif
}
if (check_in_box2d(box_b, box_a_corners[k])){
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
#ifdef DEBUG
printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y);
#endif
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++){
for (int i = 0; i < cnt - j - 1; i++){
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++){
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++){
area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b){
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 7;
const float * cur_box_b = boxes_b + b_idx * 7;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_aligned_overlap_kernel(const int num_box, const float *boxes_a, const float *boxes_b, float *ans_overlap){
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
const int idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (idx >= num_box){
return;
}
const float * cur_box_a = boxes_a + idx * 7;
const float * cur_box_b = boxes_b + idx * 7;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 7;
const float * cur_box_b = boxes_b + b_idx * 7;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const * const a, float const * const b) {
//params: a: [x, y, z, dx, dy, dz, heading]
//params: b: [x, y, z, dx, dy, dz, heading]
float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2);
float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = a[3] * a[4];
float Sb = b[3] * b[4];
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesalignedoverlapLauncher(const int num_box, const float *boxes_a, const float *boxes_b, float *ans_overlap){
dim3 blocks(DIVUP(num_box, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
boxes_aligned_overlap_kernel<<<blocks, threads>>>(num_box, boxes_a, boxes_b, ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_iou);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
|
83bc627b96e67cd15d14b84925752951f3fca7d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <conio.h>
const int arraysize = 3;
hipError_t metodoBusquedaHost(float *valoresintervalo ,float *valoresSplits,unsigned int arraysize);
__device__ float d_valorInicial = 0;
__device__ float d_valorfinal = 20;
float h_valorInicial = 0;
float h_valorfinal = 20;
__global__ void metodoBusqueda(float *vect , float *valorSplits)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
vect[i] = valorSplits[i] * 2;
}
int main()
{
float valoresIntervalo [arraysize] = {0};
float valoresSplits [arraysize] = {0};
hipError_t cudaStatus = metodoBusquedaHost(valoresIntervalo,valoresSplits,arraysize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "metodoBusquedaHost fallo!");
_getch();
return 1;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset fallo!");
_getch();
return 1;
}
for (int i = 0; i < 3; i++)
{
float number = valoresIntervalo[i];
printf("%g\n", number);
}
_getch();
return 0;
}
hipError_t metodoBusquedaHost(float *valoresintervalo,float * valoresSplits,unsigned int size)
{
float *dev_valorinicial = 0;
float *dev_valorfinal = 0;
float *dev_valoresintervalos = 0;
float *dev_valorSplits = 0;
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&dev_valorinicial,size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc fallo!");
return cudaStatus;
}
cudaStatus = hipMalloc((void**)&dev_valorSplits,size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc fallo!");
return cudaStatus;
}
cudaStatus = hipMalloc((void**)&dev_valorfinal,size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc fallo!");
return cudaStatus;
}
cudaStatus = hipMalloc((void**)&dev_valoresintervalos,size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc fallo!");
return cudaStatus;
}
cudaStatus = hipMemcpy(dev_valoresintervalos, valoresintervalo, size * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc fallo!");
return cudaStatus;
}
int firstsplitValue = (h_valorfinal - h_valorInicial)/4;
int secondSplit = (h_valorfinal - h_valorInicial) /2;
int thirdSplit = firstsplitValue + secondSplit;
valoresSplits[0] = firstsplitValue;
valoresSplits[1] = secondSplit;
valoresSplits[2] =thirdSplit;
cudaStatus = hipMemcpy(dev_valorSplits, valoresSplits, size * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc fallo!");
return cudaStatus;
}
hipLaunchKernelGGL(( metodoBusqueda), dim3(3),dim3(1), 0, 0, dev_valoresintervalos,dev_valorSplits);
cudaStatus = hipMemcpy(valoresintervalo, dev_valoresintervalos,size * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc fallo!");
return cudaStatus;
}
return cudaStatus;
}
|
83bc627b96e67cd15d14b84925752951f3fca7d9.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <iostream>
#include <conio.h>
const int arraysize = 3;
cudaError_t metodoBusquedaHost(float *valoresintervalo ,float *valoresSplits,unsigned int arraysize);
__device__ float d_valorInicial = 0;
__device__ float d_valorfinal = 20;
float h_valorInicial = 0;
float h_valorfinal = 20;
__global__ void metodoBusqueda(float *vect , float *valorSplits)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
vect[i] = valorSplits[i] * 2;
}
int main()
{
float valoresIntervalo [arraysize] = {0};
float valoresSplits [arraysize] = {0};
cudaError_t cudaStatus = metodoBusquedaHost(valoresIntervalo,valoresSplits,arraysize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "metodoBusquedaHost fallo!");
_getch();
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset fallo!");
_getch();
return 1;
}
for (int i = 0; i < 3; i++)
{
float number = valoresIntervalo[i];
printf("%g\n", number);
}
_getch();
return 0;
}
cudaError_t metodoBusquedaHost(float *valoresintervalo,float * valoresSplits,unsigned int size)
{
float *dev_valorinicial = 0;
float *dev_valorfinal = 0;
float *dev_valoresintervalos = 0;
float *dev_valorSplits = 0;
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dev_valorinicial,size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc fallo!");
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_valorSplits,size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc fallo!");
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_valorfinal,size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc fallo!");
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_valoresintervalos,size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc fallo!");
return cudaStatus;
}
cudaStatus = cudaMemcpy(dev_valoresintervalos, valoresintervalo, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc fallo!");
return cudaStatus;
}
int firstsplitValue = (h_valorfinal - h_valorInicial)/4;
int secondSplit = (h_valorfinal - h_valorInicial) /2;
int thirdSplit = firstsplitValue + secondSplit;
valoresSplits[0] = firstsplitValue;
valoresSplits[1] = secondSplit;
valoresSplits[2] =thirdSplit;
cudaStatus = cudaMemcpy(dev_valorSplits, valoresSplits, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc fallo!");
return cudaStatus;
}
metodoBusqueda<<<3,1>>>(dev_valoresintervalos,dev_valorSplits);
cudaStatus = cudaMemcpy(valoresintervalo, dev_valoresintervalos,size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc fallo!");
return cudaStatus;
}
return cudaStatus;
}
|
8b19d4fd4f72d21fc4baafc0f671567e1434a577.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void remap(const Ptr2D src, const PtrStepf mapx, const PtrStepf mapy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float xcoo = mapx.ptr(y)[x];
const float ycoo = mapy.ptr(y)[x];
dst.ptr(y)[x] = saturate_cast<T>(src(ycoo, xcoo));
}
}
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherStream
{
static void call(PtrStepSz<T> src, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, hipStream_t stream, bool)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, stream, filter_src, mapx, mapy, dst);
cudaSafeCall( hipGetLastError() );
}
};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, bool)
{
(void)srcWhole;
(void)xoff;
(void)yoff;
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
};
#define OPENCV_CUDA_IMPLEMENT_REMAP_TEX(type) \
texture< type , hipTextureType2D> tex_remap_ ## type (0, hipFilterModePoint, hipAddressModeClamp); \
struct tex_remap_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
int xoff, yoff; \
tex_remap_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_remap_ ## type , x + xoff, y + yoff); \
} \
}; \
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, \
PtrStepSz< type > dst, const float* borderValue, bool cc20) \
{ \
typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
dim3 block(32, cc20 ? 8 : 4); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_remap_ ## type , srcWhole); \
tex_remap_ ## type ##_reader texSrc(xoff, yoff); \
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue)); \
BorderReader< tex_remap_ ## type ##_reader, B<work_type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_remap_ ## type ##_reader, B<work_type> > > filter_src(brdSrc); \
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst); \
cudaSafeCall( hipGetLastError() ); \
cudaSafeCall( hipDeviceSynchronize() ); \
} \
}; \
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, \
PtrStepSz< type > dst, const float*, bool) \
{ \
dim3 block(32, 8); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_remap_ ## type , srcWhole); \
tex_remap_ ## type ##_reader texSrc(xoff, yoff); \
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows) \
{ \
Filter< tex_remap_ ## type ##_reader > filter_src(texSrc); \
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst); \
} \
else \
{ \
BrdReplicate<type> brd(src.rows, src.cols); \
BorderReader< tex_remap_ ## type ##_reader, BrdReplicate<type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_remap_ ## type ##_reader, BrdReplicate<type> > > filter_src(brdSrc); \
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst); \
} \
cudaSafeCall( hipGetLastError() ); \
cudaSafeCall( hipDeviceSynchronize() ); \
} \
};
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar4)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(schar)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char2)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short4)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int2)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float4)
#undef OPENCV_CUDA_IMPLEMENT_REMAP_TEX
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz<T> dst, const float* borderValue, hipStream_t stream, bool cc20)
{
if (stream == 0)
RemapDispatcherNonStream<Filter, B, T>::call(src, srcWhole, xoff, yoff, mapx, mapy, dst, borderValue, cc20);
else
RemapDispatcherStream<Filter, B, T>::call(src, mapx, mapy, dst, borderValue, stream, cc20);
}
};
template <typename T> void remap_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSz<T> dst, const float* borderValue, hipStream_t stream, bool cc20);
static const caller_t callers[3][5] =
{
{
RemapDispatcher<PointFilter, BrdConstant, T>::call,
RemapDispatcher<PointFilter, BrdReplicate, T>::call,
RemapDispatcher<PointFilter, BrdReflect, T>::call,
RemapDispatcher<PointFilter, BrdWrap, T>::call,
RemapDispatcher<PointFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<LinearFilter, BrdConstant, T>::call,
RemapDispatcher<LinearFilter, BrdReplicate, T>::call,
RemapDispatcher<LinearFilter, BrdReflect, T>::call,
RemapDispatcher<LinearFilter, BrdWrap, T>::call,
RemapDispatcher<LinearFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<CubicFilter, BrdConstant, T>::call,
RemapDispatcher<CubicFilter, BrdReplicate, T>::call,
RemapDispatcher<CubicFilter, BrdReflect, T>::call,
RemapDispatcher<CubicFilter, BrdWrap, T>::call,
RemapDispatcher<CubicFilter, BrdReflect101, T>::call
}
};
callers[interpolation][borderMode](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), xoff, yoff, xmap, ymap,
static_cast< PtrStepSz<T> >(dst), borderValue, stream, cc20);
}
template void remap_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
//template void remap_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
8b19d4fd4f72d21fc4baafc0f671567e1434a577.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void remap(const Ptr2D src, const PtrStepf mapx, const PtrStepf mapy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float xcoo = mapx.ptr(y)[x];
const float ycoo = mapy.ptr(y)[x];
dst.ptr(y)[x] = saturate_cast<T>(src(ycoo, xcoo));
}
}
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherStream
{
static void call(PtrStepSz<T> src, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
remap<<<grid, block, 0, stream>>>(filter_src, mapx, mapy, dst);
cudaSafeCall( cudaGetLastError() );
}
};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, bool)
{
(void)srcWhole;
(void)xoff;
(void)yoff;
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
remap<<<grid, block>>>(filter_src, mapx, mapy, dst);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
};
#define OPENCV_CUDA_IMPLEMENT_REMAP_TEX(type) \
texture< type , cudaTextureType2D> tex_remap_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
struct tex_remap_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
int xoff, yoff; \
tex_remap_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_remap_ ## type , x + xoff, y + yoff); \
} \
}; \
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, \
PtrStepSz< type > dst, const float* borderValue, bool cc20) \
{ \
typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
dim3 block(32, cc20 ? 8 : 4); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_remap_ ## type , srcWhole); \
tex_remap_ ## type ##_reader texSrc(xoff, yoff); \
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue)); \
BorderReader< tex_remap_ ## type ##_reader, B<work_type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_remap_ ## type ##_reader, B<work_type> > > filter_src(brdSrc); \
remap<<<grid, block>>>(filter_src, mapx, mapy, dst); \
cudaSafeCall( cudaGetLastError() ); \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
}; \
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, \
PtrStepSz< type > dst, const float*, bool) \
{ \
dim3 block(32, 8); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_remap_ ## type , srcWhole); \
tex_remap_ ## type ##_reader texSrc(xoff, yoff); \
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows) \
{ \
Filter< tex_remap_ ## type ##_reader > filter_src(texSrc); \
remap<<<grid, block>>>(filter_src, mapx, mapy, dst); \
} \
else \
{ \
BrdReplicate<type> brd(src.rows, src.cols); \
BorderReader< tex_remap_ ## type ##_reader, BrdReplicate<type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_remap_ ## type ##_reader, BrdReplicate<type> > > filter_src(brdSrc); \
remap<<<grid, block>>>(filter_src, mapx, mapy, dst); \
} \
cudaSafeCall( cudaGetLastError() ); \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
};
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar4)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(schar)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char2)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short4)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int2)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float4)
#undef OPENCV_CUDA_IMPLEMENT_REMAP_TEX
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool cc20)
{
if (stream == 0)
RemapDispatcherNonStream<Filter, B, T>::call(src, srcWhole, xoff, yoff, mapx, mapy, dst, borderValue, cc20);
else
RemapDispatcherStream<Filter, B, T>::call(src, mapx, mapy, dst, borderValue, stream, cc20);
}
};
template <typename T> void remap_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool cc20);
static const caller_t callers[3][5] =
{
{
RemapDispatcher<PointFilter, BrdConstant, T>::call,
RemapDispatcher<PointFilter, BrdReplicate, T>::call,
RemapDispatcher<PointFilter, BrdReflect, T>::call,
RemapDispatcher<PointFilter, BrdWrap, T>::call,
RemapDispatcher<PointFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<LinearFilter, BrdConstant, T>::call,
RemapDispatcher<LinearFilter, BrdReplicate, T>::call,
RemapDispatcher<LinearFilter, BrdReflect, T>::call,
RemapDispatcher<LinearFilter, BrdWrap, T>::call,
RemapDispatcher<LinearFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<CubicFilter, BrdConstant, T>::call,
RemapDispatcher<CubicFilter, BrdReplicate, T>::call,
RemapDispatcher<CubicFilter, BrdReflect, T>::call,
RemapDispatcher<CubicFilter, BrdWrap, T>::call,
RemapDispatcher<CubicFilter, BrdReflect101, T>::call
}
};
callers[interpolation][borderMode](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), xoff, yoff, xmap, ymap,
static_cast< PtrStepSz<T> >(dst), borderValue, stream, cc20);
}
template void remap_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
521c0e8e6e0466608810132b1e7df6d8ee690c8b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include<hiprand/hiprand_kernel.h>
//
__device__ float calcDistance(int target_row ,int target_col,int source_row, int source_col , double * target_block ,double * source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
int r = 3;
double dif=0;
double dif0=1,dif1=1,dif2=1;
if( target_row-r<0 ) target_row = r;
if( target_col-r<0 ) target_col =r;
if( source_row-r<0 ) source_row = r;
if( source_col-r<0 ) source_col = r;
if( target_row+r>=target_rows ) target_row = target_cols-1-r;
if( target_col+r>= target_cols ) target_col = target_rows-1-r;
if( source_row+r>=source_rows ) source_row = source_rows-1-r;
if( source_col+r>= source_cols ) source_col = source_cols-1-r;
for(int i=-r ;i<=r;i++){
for(int j=-r ;j<=r;j++){
int temp = 3*((source_row+i)*source_cols+source_col+j) ;
int temp2 = 3*((target_row+i)*target_cols+target_col+j) ;
dif0 = source_block[ temp+ 0] - target_block[temp2+ 0] ;
dif1 = source_block[ temp+ 1] - target_block[temp2+ 1] ;
dif2 = source_block[ temp+ 2] - target_block[temp2+ 2] ;
dif += sqrt(dif0*dif0 +dif1*dif1 +dif2*dif2);
}
}
return dif;
}
__device__ int calcDistance(int target_row ,int target_col,int source_row1, int source_col1,int source_row2, int source_col2 ,int source_row3, int source_col3,double * target_block , double *source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
float first2Second = calcDistance(target_row, target_col , source_row1, source_col1,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Third = calcDistance( target_row, target_col , source_row2, source_col2,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Fourth = calcDistance( target_row,target_col , source_row3, source_col3,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if (first2Second<=first2Third)
{
if (first2Second<=first2Fourth)
return 1;
else
return 3;
}
else if (first2Third<= first2Fourth)
return 2;
else
return 3;
}
__device__ int calcDistance(int target_row,int target_col ,int source_row1, int source_col1,int source_row2, int source_col2 ,double * target_block ,double * source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
float first2Second = calcDistance(target_row, target_col , source_row1, source_col1,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Third = calcDistance( target_row, target_col , source_row2, source_col2,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if (first2Second <= first2Third)
return 1;
return 2;
}
__global__ void extern PropagationGPU(double * target_block ,double * source_block , int * relation_block , int target_rows , int target_cols ,int source_rows , int source_cols)
{
//
int y = blockIdx.x;
int x = threadIdx.x;
//
int c_r0 = relation_block[ 2*(y*target_cols+x) + 0 ];
int c_c0 = relation_block[ 2*(y*target_cols+x) + 1];
int c_r1 = relation_block[ 2*((y+1)*target_cols+x) + 0 ]-1;
int c_c1 = relation_block[ 2*((y+1)*target_cols+x) + 1 ];
int c_r2 = relation_block[ 2*(y*target_cols+x+1) + 0];
int c_c2 = relation_block[ 2*(y*target_cols+x+1) + 1]-1;
int patchNumber = calcDistance(y , x , c_r0 , c_c0 , c_r1, c_c1 , c_r2 , c_c2 , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
switch(patchNumber)
{
case 2:
relation_block[ 2*(y*target_cols+x) + 0 ]= c_r1;
relation_block[ 2*(y*target_cols+x) + 1 ]= c_c1;
break;
case 3:
relation_block[ 2*(y*target_cols+x) + 0 ] = c_r2;
relation_block[ 2*(y*target_cols+x) + 1 ] = c_c2;
break;
}
}
__global__ void extern RandomSearchGPU(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols){
//
int y = blockIdx.x;
int x = threadIdx.x;
//
int c_r0 = relation_block[ 2*(y*target_cols+x) + 0 ];
int c_c0 = relation_block[ 2*(y*target_cols+x) + 1];
int c_r1 = relation_block[ 2*((y-2)*target_cols+x) + 0 ]+2;
int c_c1 = relation_block[ 2*((y-2)*target_cols+x) + 1 ];
int c_r2 = relation_block[ 2*(y*target_cols+x-2) + 0];
int c_c2 = relation_block[ 2*(y*target_cols+x-2) + 1]+2;
int patchNumber = calcDistance(y , x , c_r0 , c_c0 , c_r1, c_c1 , c_r2 , c_c2 , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
switch(patchNumber)
{
case 2:
relation_block[ 2*(y*target_cols+x) + 0 ]= c_r1;
relation_block[ 2*(y*target_cols+x) + 1 ]= c_c1;
break;
case 3:
relation_block[ 2*(y*target_cols+x) + 0 ] = c_r2;
relation_block[ 2*(y*target_cols+x) + 1 ] = c_c2;
break;
}
}
__global__ void extern baoli(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols,double *distance){
int y = threadIdx.y;
int x = threadIdx.x;
for(int i = 0 ; i<12;i++){
for(int j = 0 ;j< 12 ; j++){
double c = calcDistance(y , x , i , j , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if( c < distance[ y*target_cols+x ]){
relation_block[ 2*(y*target_cols+x) + 0 ]= 1;
relation_block[ 2*(y*target_cols+x) + 1 ]= 1;
distance[ y*target_cols+x ] = c;
}
}
}
}
void extern bridge(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols, double * distance){
//
/**/ for(int i = 0;i<130 ;i++){
hipLaunchKernelGGL(( PropagationGPU), dim3(target_rows) ,dim3(target_cols), 0, 0, target_block, source_block, relation_block , target_rows , target_cols , source_rows , source_cols);
hipDeviceSynchronize();
hipLaunchKernelGGL(( RandomSearchGPU), dim3(target_rows ),dim3(target_cols), 0, 0, target_block, source_block, relation_block,target_rows,target_cols,source_rows, source_cols);
hipDeviceSynchronize();
}
//baoli<<<target_rows ,target_cols>>>(target_block, source_block, relation_block , target_rows , target_cols , source_rows , source_cols ,distance);
}
|
521c0e8e6e0466608810132b1e7df6d8ee690c8b.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include<curand_kernel.h>
//计算距离
__device__ float calcDistance(int target_row ,int target_col,int source_row, int source_col , double * target_block ,double * source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
int r = 3;
double dif=0;
double dif0=1,dif1=1,dif2=1;
if( target_row-r<0 ) target_row = r;
if( target_col-r<0 ) target_col =r;
if( source_row-r<0 ) source_row = r;
if( source_col-r<0 ) source_col = r;
if( target_row+r>=target_rows ) target_row = target_cols-1-r;
if( target_col+r>= target_cols ) target_col = target_rows-1-r;
if( source_row+r>=source_rows ) source_row = source_rows-1-r;
if( source_col+r>= source_cols ) source_col = source_cols-1-r;
for(int i=-r ;i<=r;i++){
for(int j=-r ;j<=r;j++){
int temp = 3*((source_row+i)*source_cols+source_col+j) ;
int temp2 = 3*((target_row+i)*target_cols+target_col+j) ;
dif0 = source_block[ temp+ 0] - target_block[temp2+ 0] ;
dif1 = source_block[ temp+ 1] - target_block[temp2+ 1] ;
dif2 = source_block[ temp+ 2] - target_block[temp2+ 2] ;
dif += sqrt(dif0*dif0 +dif1*dif1 +dif2*dif2);
}
}
return dif;
}
__device__ int calcDistance(int target_row ,int target_col,int source_row1, int source_col1,int source_row2, int source_col2 ,int source_row3, int source_col3,double * target_block , double *source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
float first2Second = calcDistance(target_row, target_col , source_row1, source_col1,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Third = calcDistance( target_row, target_col , source_row2, source_col2,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Fourth = calcDistance( target_row,target_col , source_row3, source_col3,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if (first2Second<=first2Third)
{
if (first2Second<=first2Fourth)
return 1;
else
return 3;
}
else if (first2Third<= first2Fourth)
return 2;
else
return 3;
}
__device__ int calcDistance(int target_row,int target_col ,int source_row1, int source_col1,int source_row2, int source_col2 ,double * target_block ,double * source_block,int target_rows,int target_cols ,int source_rows,int source_cols)
{
float first2Second = calcDistance(target_row, target_col , source_row1, source_col1,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
float first2Third = calcDistance( target_row, target_col , source_row2, source_col2,target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if (first2Second <= first2Third)
return 1;
return 2;
}
__global__ void extern PropagationGPU(double * target_block ,double * source_block , int * relation_block , int target_rows , int target_cols ,int source_rows , int source_cols)
{
//传递过程;
int y = blockIdx.x;
int x = threadIdx.x;
//定义半径;
int c_r0 = relation_block[ 2*(y*target_cols+x) + 0 ];
int c_c0 = relation_block[ 2*(y*target_cols+x) + 1];
int c_r1 = relation_block[ 2*((y+1)*target_cols+x) + 0 ]-1;
int c_c1 = relation_block[ 2*((y+1)*target_cols+x) + 1 ];
int c_r2 = relation_block[ 2*(y*target_cols+x+1) + 0];
int c_c2 = relation_block[ 2*(y*target_cols+x+1) + 1]-1;
int patchNumber = calcDistance(y , x , c_r0 , c_c0 , c_r1, c_c1 , c_r2 , c_c2 , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
switch(patchNumber)
{
case 2:
relation_block[ 2*(y*target_cols+x) + 0 ]= c_r1;
relation_block[ 2*(y*target_cols+x) + 1 ]= c_c1;
break;
case 3:
relation_block[ 2*(y*target_cols+x) + 0 ] = c_r2;
relation_block[ 2*(y*target_cols+x) + 1 ] = c_c2;
break;
}
}
__global__ void extern RandomSearchGPU(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols){
//传递过程;
int y = blockIdx.x;
int x = threadIdx.x;
//定义半径;
int c_r0 = relation_block[ 2*(y*target_cols+x) + 0 ];
int c_c0 = relation_block[ 2*(y*target_cols+x) + 1];
int c_r1 = relation_block[ 2*((y-2)*target_cols+x) + 0 ]+2;
int c_c1 = relation_block[ 2*((y-2)*target_cols+x) + 1 ];
int c_r2 = relation_block[ 2*(y*target_cols+x-2) + 0];
int c_c2 = relation_block[ 2*(y*target_cols+x-2) + 1]+2;
int patchNumber = calcDistance(y , x , c_r0 , c_c0 , c_r1, c_c1 , c_r2 , c_c2 , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
switch(patchNumber)
{
case 2:
relation_block[ 2*(y*target_cols+x) + 0 ]= c_r1;
relation_block[ 2*(y*target_cols+x) + 1 ]= c_c1;
break;
case 3:
relation_block[ 2*(y*target_cols+x) + 0 ] = c_r2;
relation_block[ 2*(y*target_cols+x) + 1 ] = c_c2;
break;
}
}
__global__ void extern baoli(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols,double *distance){
int y = threadIdx.y;
int x = threadIdx.x;
for(int i = 0 ; i<12;i++){
for(int j = 0 ;j< 12 ; j++){
double c = calcDistance(y , x , i , j , target_block ,source_block,target_rows,target_cols,source_rows, source_cols);
if( c < distance[ y*target_cols+x ]){
relation_block[ 2*(y*target_cols+x) + 0 ]= 1;
relation_block[ 2*(y*target_cols+x) + 1 ]= 1;
distance[ y*target_cols+x ] = c;
}
}
}
}
void extern bridge(double * target_block ,double * source_block ,int * relation_block,int target_rows,int target_cols ,int source_rows,int source_cols, double * distance){
//对线程大小有限制;
/**/ for(int i = 0;i<130 ;i++){
PropagationGPU<<<target_rows ,target_cols>>>(target_block, source_block, relation_block , target_rows , target_cols , source_rows , source_cols);
cudaThreadSynchronize();
RandomSearchGPU<<<target_rows ,target_cols>>>(target_block, source_block, relation_block,target_rows,target_cols,source_rows, source_cols);
cudaThreadSynchronize();
}
//baoli<<<target_rows ,target_cols>>>(target_block, source_block, relation_block , target_rows , target_cols , source_rows , source_cols ,distance);
}
|
9f0e17a8c4453cc7a7f6fd3320a7bbbe22ea545b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __floatToLong(float *A, long long *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
|
9f0e17a8c4453cc7a7f6fd3320a7bbbe22ea545b.cu
|
#include "includes.h"
__global__ void __floatToLong(float *A, long long *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
}
|
2a32cb8386f78a947953df6aa8a7ba494c1c698a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_field_summary_kernel [12][2];
static int dims_field_summary_kernel_h [12][2] = {0};
//user function
__device__
void field_summary_kernel_gpu(const ACC<double> &volume,
const ACC<double> &density0,
const ACC<double> &energy0,
const ACC<double> &pressure,
const ACC<double> &xvel0,
const ACC<double> &yvel0,
const ACC<double> &zvel0,
double *vol,
double *mass,
double *ie,
double *ke,
double *press) {
double vsqrd, cell_vol, cell_mass;
vsqrd = 0.0;
vsqrd+=0.125*( xvel0(0,0,0) * xvel0(0,0,0) +
yvel0(0,0,0) * yvel0(0,0,0) +
zvel0(0,0,0) * zvel0(0,0,0));
vsqrd+=0.125*( xvel0(1,0,0) * xvel0(1,0,0) +
yvel0(1,0,0) * yvel0(1,0,0) +
zvel0(1,0,0) * zvel0(1,0,0));
vsqrd+=0.125*( xvel0(0,1,0) * xvel0(0,1,0) +
yvel0(0,1,0) * yvel0(0,1,0) +
zvel0(0,1,0) * zvel0(0,1,0));
vsqrd+=0.125*( xvel0(1,1,0) * xvel0(1,1,0) +
yvel0(1,1,0) * yvel0(1,1,0) +
zvel0(1,1,0) * zvel0(1,1,0));
vsqrd+=0.125*( xvel0(0,0,1) * xvel0(0,0,1) +
yvel0(0,0,1) * yvel0(0,0,1) +
zvel0(0,0,1) * zvel0(0,0,1));
vsqrd+=0.125*( xvel0(1,0,1) * xvel0(1,0,1) +
yvel0(1,0,1) * yvel0(1,0,1) +
zvel0(1,0,1) * zvel0(1,0,1));
vsqrd+=0.125*( xvel0(0,1,1) * xvel0(0,1,1) +
yvel0(0,1,1) * yvel0(0,1,1) +
zvel0(0,1,1) * zvel0(0,1,1));
vsqrd+=0.125*( xvel0(1,1,1) * xvel0(1,1,1) +
yvel0(1,1,1) * yvel0(1,1,1) +
zvel0(1,1,1) * zvel0(1,1,1));
cell_vol = volume(0,0,0);
cell_mass = cell_vol * density0(0,0,0);
*vol = *vol + cell_vol;
*mass = *mass + cell_mass;
*ie = *ie + cell_mass * energy0(0,0,0);
*ke = *ke + cell_mass * 0.5 * vsqrd;
*press = *press + cell_vol * pressure(0,0,0);
}
__global__ void ops_field_summary_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
double* __restrict arg11,
int size0,
int size1,
int size2 ){
double arg7_l[1];
double arg8_l[1];
double arg9_l[1];
double arg10_l[1];
double arg11_l[1];
for (int d=0; d<1; d++) arg7_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg8_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg9_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg10_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg11_l[d] = ZERO_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[0][0] + idx_z * 1*1 * dims_field_summary_kernel[0][0] * dims_field_summary_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[1][0] + idx_z * 1*1 * dims_field_summary_kernel[1][0] * dims_field_summary_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[2][0] + idx_z * 1*1 * dims_field_summary_kernel[2][0] * dims_field_summary_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[3][0] + idx_z * 1*1 * dims_field_summary_kernel[3][0] * dims_field_summary_kernel[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[4][0] + idx_z * 1*1 * dims_field_summary_kernel[4][0] * dims_field_summary_kernel[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[5][0] + idx_z * 1*1 * dims_field_summary_kernel[5][0] * dims_field_summary_kernel[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[6][0] + idx_z * 1*1 * dims_field_summary_kernel[6][0] * dims_field_summary_kernel[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_field_summary_kernel[0][0], dims_field_summary_kernel[0][1], arg0);
const ACC<double> argp1(dims_field_summary_kernel[1][0], dims_field_summary_kernel[1][1], arg1);
const ACC<double> argp2(dims_field_summary_kernel[2][0], dims_field_summary_kernel[2][1], arg2);
const ACC<double> argp3(dims_field_summary_kernel[3][0], dims_field_summary_kernel[3][1], arg3);
const ACC<double> argp4(dims_field_summary_kernel[4][0], dims_field_summary_kernel[4][1], arg4);
const ACC<double> argp5(dims_field_summary_kernel[5][0], dims_field_summary_kernel[5][1], arg5);
const ACC<double> argp6(dims_field_summary_kernel[6][0], dims_field_summary_kernel[6][1], arg6);
field_summary_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7_l, arg8_l,
arg9_l, arg10_l, arg11_l);
}
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg7[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg7_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg8[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg8_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg9[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg9_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg10[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg10_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg11[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg11_l[d]);
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_field_summary_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11) {
#else
void ops_par_loop_field_summary_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[12] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,12,range,95)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(95,"field_summary_kernel");
OPS_kernels[95].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 12,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_field_summary_kernel_h[0][0] || ydim0 != dims_field_summary_kernel_h[0][1] || xdim1 != dims_field_summary_kernel_h[1][0] || ydim1 != dims_field_summary_kernel_h[1][1] || xdim2 != dims_field_summary_kernel_h[2][0] || ydim2 != dims_field_summary_kernel_h[2][1] || xdim3 != dims_field_summary_kernel_h[3][0] || ydim3 != dims_field_summary_kernel_h[3][1] || xdim4 != dims_field_summary_kernel_h[4][0] || ydim4 != dims_field_summary_kernel_h[4][1] || xdim5 != dims_field_summary_kernel_h[5][0] || ydim5 != dims_field_summary_kernel_h[5][1] || xdim6 != dims_field_summary_kernel_h[6][0] || ydim6 != dims_field_summary_kernel_h[6][1]) {
dims_field_summary_kernel_h[0][0] = xdim0;
dims_field_summary_kernel_h[0][1] = ydim0;
dims_field_summary_kernel_h[1][0] = xdim1;
dims_field_summary_kernel_h[1][1] = ydim1;
dims_field_summary_kernel_h[2][0] = xdim2;
dims_field_summary_kernel_h[2][1] = ydim2;
dims_field_summary_kernel_h[3][0] = xdim3;
dims_field_summary_kernel_h[3][1] = ydim3;
dims_field_summary_kernel_h[4][0] = xdim4;
dims_field_summary_kernel_h[4][1] = ydim4;
dims_field_summary_kernel_h[5][0] = xdim5;
dims_field_summary_kernel_h[5][1] = ydim5;
dims_field_summary_kernel_h[6][0] = xdim6;
dims_field_summary_kernel_h[6][1] = ydim6;
cutilSafeCall(hipMemcpyToSymbol( dims_field_summary_kernel, dims_field_summary_kernel_h, sizeof(dims_field_summary_kernel)));
}
#if defined(OPS_LAZY) && !defined(OPS_MPI)
ops_block block = desc->block;
#endif
#ifdef OPS_MPI
double *arg7h = (double *)(((ops_reduction)args[7].data)->data + ((ops_reduction)args[7].data)->size * block->index);
#else
double *arg7h = (double *)(((ops_reduction)args[7].data)->data);
#endif
#ifdef OPS_MPI
double *arg8h = (double *)(((ops_reduction)args[8].data)->data + ((ops_reduction)args[8].data)->size * block->index);
#else
double *arg8h = (double *)(((ops_reduction)args[8].data)->data);
#endif
#ifdef OPS_MPI
double *arg9h = (double *)(((ops_reduction)args[9].data)->data + ((ops_reduction)args[9].data)->size * block->index);
#else
double *arg9h = (double *)(((ops_reduction)args[9].data)->data);
#endif
#ifdef OPS_MPI
double *arg10h = (double *)(((ops_reduction)args[10].data)->data + ((ops_reduction)args[10].data)->size * block->index);
#else
double *arg10h = (double *)(((ops_reduction)args[10].data)->data);
#endif
#ifdef OPS_MPI
double *arg11h = (double *)(((ops_reduction)args[11].data)->data + ((ops_reduction)args[11].data)->size * block->index);
#else
double *arg11h = (double *)(((ops_reduction)args[11].data)->data);
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1)*((z_size-1)/OPS_block_size_z +1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg7.data = OPS_reduct_h + reduct_bytes;
arg7.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg7.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg8.data = OPS_reduct_h + reduct_bytes;
arg8.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg8.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg9.data = OPS_reduct_h + reduct_bytes;
arg9.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg9.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg10.data = OPS_reduct_h + reduct_bytes;
arg10.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg10.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg11.data = OPS_reduct_h + reduct_bytes;
arg11.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg11.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[12];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 12);
ops_halo_exchanges(args,12,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[95].mpi_time += t2-t1;
}
int nshared = 0;
int nthread = OPS_block_size_x*OPS_block_size_y*OPS_block_size_z;
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared*nthread,reduct_size*nthread);
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_field_summary_kernel), dim3(grid), dim3(tblock), nshared , 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)arg7.data_d,
(double *)arg8.data_d, (double *)arg9.data_d,
(double *)arg10.data_d, (double *)arg11.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg7h[d] = arg7h[d] + ((double *)arg7.data)[d+b*1];
}
}
arg7.data = (char *)arg7h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg8h[d] = arg8h[d] + ((double *)arg8.data)[d+b*1];
}
}
arg8.data = (char *)arg8h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg9h[d] = arg9h[d] + ((double *)arg9.data)[d+b*1];
}
}
arg9.data = (char *)arg9h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg10h[d] = arg10h[d] + ((double *)arg10.data)[d+b*1];
}
}
arg10.data = (char *)arg10h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg11h[d] = arg11h[d] + ((double *)arg11.data)[d+b*1];
}
}
arg11.data = (char *)arg11h;
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[95].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 12);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[95].mpi_time += t2-t1;
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_field_summary_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 95;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 95;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 12;
desc->args = (ops_arg*)malloc(12*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->args[8] = arg8;
desc->args[9] = arg9;
desc->args[10] = arg10;
desc->args[11] = arg11;
desc->function = ops_par_loop_field_summary_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(95,"field_summary_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
2a32cb8386f78a947953df6aa8a7ba494c1c698a.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_field_summary_kernel [12][2];
static int dims_field_summary_kernel_h [12][2] = {0};
//user function
__device__
void field_summary_kernel_gpu(const ACC<double> &volume,
const ACC<double> &density0,
const ACC<double> &energy0,
const ACC<double> &pressure,
const ACC<double> &xvel0,
const ACC<double> &yvel0,
const ACC<double> &zvel0,
double *vol,
double *mass,
double *ie,
double *ke,
double *press) {
double vsqrd, cell_vol, cell_mass;
vsqrd = 0.0;
vsqrd+=0.125*( xvel0(0,0,0) * xvel0(0,0,0) +
yvel0(0,0,0) * yvel0(0,0,0) +
zvel0(0,0,0) * zvel0(0,0,0));
vsqrd+=0.125*( xvel0(1,0,0) * xvel0(1,0,0) +
yvel0(1,0,0) * yvel0(1,0,0) +
zvel0(1,0,0) * zvel0(1,0,0));
vsqrd+=0.125*( xvel0(0,1,0) * xvel0(0,1,0) +
yvel0(0,1,0) * yvel0(0,1,0) +
zvel0(0,1,0) * zvel0(0,1,0));
vsqrd+=0.125*( xvel0(1,1,0) * xvel0(1,1,0) +
yvel0(1,1,0) * yvel0(1,1,0) +
zvel0(1,1,0) * zvel0(1,1,0));
vsqrd+=0.125*( xvel0(0,0,1) * xvel0(0,0,1) +
yvel0(0,0,1) * yvel0(0,0,1) +
zvel0(0,0,1) * zvel0(0,0,1));
vsqrd+=0.125*( xvel0(1,0,1) * xvel0(1,0,1) +
yvel0(1,0,1) * yvel0(1,0,1) +
zvel0(1,0,1) * zvel0(1,0,1));
vsqrd+=0.125*( xvel0(0,1,1) * xvel0(0,1,1) +
yvel0(0,1,1) * yvel0(0,1,1) +
zvel0(0,1,1) * zvel0(0,1,1));
vsqrd+=0.125*( xvel0(1,1,1) * xvel0(1,1,1) +
yvel0(1,1,1) * yvel0(1,1,1) +
zvel0(1,1,1) * zvel0(1,1,1));
cell_vol = volume(0,0,0);
cell_mass = cell_vol * density0(0,0,0);
*vol = *vol + cell_vol;
*mass = *mass + cell_mass;
*ie = *ie + cell_mass * energy0(0,0,0);
*ke = *ke + cell_mass * 0.5 * vsqrd;
*press = *press + cell_vol * pressure(0,0,0);
}
__global__ void ops_field_summary_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
double* __restrict arg7,
double* __restrict arg8,
double* __restrict arg9,
double* __restrict arg10,
double* __restrict arg11,
int size0,
int size1,
int size2 ){
double arg7_l[1];
double arg8_l[1];
double arg9_l[1];
double arg10_l[1];
double arg11_l[1];
for (int d=0; d<1; d++) arg7_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg8_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg9_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg10_l[d] = ZERO_double;
for (int d=0; d<1; d++) arg11_l[d] = ZERO_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[0][0] + idx_z * 1*1 * dims_field_summary_kernel[0][0] * dims_field_summary_kernel[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[1][0] + idx_z * 1*1 * dims_field_summary_kernel[1][0] * dims_field_summary_kernel[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[2][0] + idx_z * 1*1 * dims_field_summary_kernel[2][0] * dims_field_summary_kernel[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[3][0] + idx_z * 1*1 * dims_field_summary_kernel[3][0] * dims_field_summary_kernel[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[4][0] + idx_z * 1*1 * dims_field_summary_kernel[4][0] * dims_field_summary_kernel[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[5][0] + idx_z * 1*1 * dims_field_summary_kernel[5][0] * dims_field_summary_kernel[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_field_summary_kernel[6][0] + idx_z * 1*1 * dims_field_summary_kernel[6][0] * dims_field_summary_kernel[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_field_summary_kernel[0][0], dims_field_summary_kernel[0][1], arg0);
const ACC<double> argp1(dims_field_summary_kernel[1][0], dims_field_summary_kernel[1][1], arg1);
const ACC<double> argp2(dims_field_summary_kernel[2][0], dims_field_summary_kernel[2][1], arg2);
const ACC<double> argp3(dims_field_summary_kernel[3][0], dims_field_summary_kernel[3][1], arg3);
const ACC<double> argp4(dims_field_summary_kernel[4][0], dims_field_summary_kernel[4][1], arg4);
const ACC<double> argp5(dims_field_summary_kernel[5][0], dims_field_summary_kernel[5][1], arg5);
const ACC<double> argp6(dims_field_summary_kernel[6][0], dims_field_summary_kernel[6][1], arg6);
field_summary_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7_l, arg8_l,
arg9_l, arg10_l, arg11_l);
}
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg7[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg7_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg8[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg8_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg9[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg9_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg10[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg10_l[d]);
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_INC>(&arg11[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg11_l[d]);
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_field_summary_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8,
ops_arg arg9, ops_arg arg10, ops_arg arg11) {
#else
void ops_par_loop_field_summary_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
ops_arg arg8 = desc->args[8];
ops_arg arg9 = desc->args[9];
ops_arg arg10 = desc->args[10];
ops_arg arg11 = desc->args[11];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[12] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,12,range,95)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(95,"field_summary_kernel");
OPS_kernels[95].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 12,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_field_summary_kernel_h[0][0] || ydim0 != dims_field_summary_kernel_h[0][1] || xdim1 != dims_field_summary_kernel_h[1][0] || ydim1 != dims_field_summary_kernel_h[1][1] || xdim2 != dims_field_summary_kernel_h[2][0] || ydim2 != dims_field_summary_kernel_h[2][1] || xdim3 != dims_field_summary_kernel_h[3][0] || ydim3 != dims_field_summary_kernel_h[3][1] || xdim4 != dims_field_summary_kernel_h[4][0] || ydim4 != dims_field_summary_kernel_h[4][1] || xdim5 != dims_field_summary_kernel_h[5][0] || ydim5 != dims_field_summary_kernel_h[5][1] || xdim6 != dims_field_summary_kernel_h[6][0] || ydim6 != dims_field_summary_kernel_h[6][1]) {
dims_field_summary_kernel_h[0][0] = xdim0;
dims_field_summary_kernel_h[0][1] = ydim0;
dims_field_summary_kernel_h[1][0] = xdim1;
dims_field_summary_kernel_h[1][1] = ydim1;
dims_field_summary_kernel_h[2][0] = xdim2;
dims_field_summary_kernel_h[2][1] = ydim2;
dims_field_summary_kernel_h[3][0] = xdim3;
dims_field_summary_kernel_h[3][1] = ydim3;
dims_field_summary_kernel_h[4][0] = xdim4;
dims_field_summary_kernel_h[4][1] = ydim4;
dims_field_summary_kernel_h[5][0] = xdim5;
dims_field_summary_kernel_h[5][1] = ydim5;
dims_field_summary_kernel_h[6][0] = xdim6;
dims_field_summary_kernel_h[6][1] = ydim6;
cutilSafeCall(cudaMemcpyToSymbol( dims_field_summary_kernel, dims_field_summary_kernel_h, sizeof(dims_field_summary_kernel)));
}
#if defined(OPS_LAZY) && !defined(OPS_MPI)
ops_block block = desc->block;
#endif
#ifdef OPS_MPI
double *arg7h = (double *)(((ops_reduction)args[7].data)->data + ((ops_reduction)args[7].data)->size * block->index);
#else
double *arg7h = (double *)(((ops_reduction)args[7].data)->data);
#endif
#ifdef OPS_MPI
double *arg8h = (double *)(((ops_reduction)args[8].data)->data + ((ops_reduction)args[8].data)->size * block->index);
#else
double *arg8h = (double *)(((ops_reduction)args[8].data)->data);
#endif
#ifdef OPS_MPI
double *arg9h = (double *)(((ops_reduction)args[9].data)->data + ((ops_reduction)args[9].data)->size * block->index);
#else
double *arg9h = (double *)(((ops_reduction)args[9].data)->data);
#endif
#ifdef OPS_MPI
double *arg10h = (double *)(((ops_reduction)args[10].data)->data + ((ops_reduction)args[10].data)->size * block->index);
#else
double *arg10h = (double *)(((ops_reduction)args[10].data)->data);
#endif
#ifdef OPS_MPI
double *arg11h = (double *)(((ops_reduction)args[11].data)->data + ((ops_reduction)args[11].data)->size * block->index);
#else
double *arg11h = (double *)(((ops_reduction)args[11].data)->data);
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1)*((z_size-1)/OPS_block_size_z +1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg7.data = OPS_reduct_h + reduct_bytes;
arg7.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg7.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg8.data = OPS_reduct_h + reduct_bytes;
arg8.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg8.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg9.data = OPS_reduct_h + reduct_bytes;
arg9.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg9.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg10.data = OPS_reduct_h + reduct_bytes;
arg10.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg10.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg11.data = OPS_reduct_h + reduct_bytes;
arg11.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg11.data)[d+b*1] = ZERO_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[12];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 12);
ops_halo_exchanges(args,12,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[95].mpi_time += t2-t1;
}
int nshared = 0;
int nthread = OPS_block_size_x*OPS_block_size_y*OPS_block_size_z;
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared*nthread,reduct_size*nthread);
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_field_summary_kernel<<<grid, tblock, nshared >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (double *)arg7.data_d,
(double *)arg8.data_d, (double *)arg9.data_d,
(double *)arg10.data_d, (double *)arg11.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg7h[d] = arg7h[d] + ((double *)arg7.data)[d+b*1];
}
}
arg7.data = (char *)arg7h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg8h[d] = arg8h[d] + ((double *)arg8.data)[d+b*1];
}
}
arg8.data = (char *)arg8h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg9h[d] = arg9h[d] + ((double *)arg9.data)[d+b*1];
}
}
arg9.data = (char *)arg9h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg10h[d] = arg10h[d] + ((double *)arg10.data)[d+b*1];
}
}
arg10.data = (char *)arg10h;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg11h[d] = arg11h[d] + ((double *)arg11.data)[d+b*1];
}
}
arg11.data = (char *)arg11h;
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[95].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 12);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[95].mpi_time += t2-t1;
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[95].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_field_summary_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 95;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 95;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 12;
desc->args = (ops_arg*)malloc(12*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
desc->args[8] = arg8;
desc->args[9] = arg9;
desc->args[10] = arg10;
desc->args[11] = arg11;
desc->function = ops_par_loop_field_summary_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(95,"field_summary_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
9218c3a4f7292c9f7e19672777ed64feff3f7750.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
/* Optimization 2: Memory Optimization by changing computation of array index values */
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.y * blockDim.y * width + blockIdx.x * blockDim.x;
int index = start + threadIdx.y * width + threadIdx.x;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
if(ty % 2 == 0 && tx % 2 == 0)
out[index] = 2.0 * in[index]/sum;
else if(ty % 2 == 1 && tx % 2 == 0)
out[index] = in[index]/sum;
else if(ty % 2 == 1 && tx % 2 == 1)
out[index] = (-1.0) * in[index]/sum;
else
out[index] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
hipLaunchKernelGGL(( norm), dim3(grid), dim3(block), 0, 0, dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
|
9218c3a4f7292c9f7e19672777ed64feff3f7750.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
/* Optimization 2: Memory Optimization by changing computation of array index values */
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.y * blockDim.y * width + blockIdx.x * blockDim.x;
int index = start + threadIdx.y * width + threadIdx.x;
float sum = 0.0f;
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
if(ty % 2 == 0 && tx % 2 == 0)
out[index] = 2.0 * in[index]/sum;
else if(ty % 2 == 1 && tx % 2 == 0)
out[index] = in[index]/sum;
else if(ty % 2 == 1 && tx % 2 == 1)
out[index] = (-1.0) * in[index]/sum;
else
out[index] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
norm<<<grid, block>>>(dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
|
be9156c69ee162834a3d9a61735ba94b3be711d9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): tune it
template<bool IsFullPass>
struct THalfBytePairwiseHistUnrollTrait {
static constexpr int InnerUnroll() {
#if __CUDA_ARCH__ <= 350
return 2;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 8;//IsFullPass ? 4 : 8;
#endif
}
static constexpr int OuterUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 1;
#endif
}
};
template<int BLOCK_SIZE, class TCmpBins = TCmpBinsWithoutOneHot>
struct TPairHistHalfByte {
TCmpBins CmpBinsFunc;
float* Slice;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
//we store 4 histograms per block
// x8 feature and x4 histograms, though histStart = blockIdx * 16
return warpOffset + (threadIdx.x & 16);
}
__forceinline__ __device__ TPairHistHalfByte(float* buff, TCmpBins cmpBinsFunc)
: CmpBinsFunc(cmpBinsFunc) {
Slice = buff;
for (int i = threadIdx.x; i < BLOCK_SIZE * 32; i += BLOCK_SIZE) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1,
const ui32 ci2,
const float w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
const ui32 bins1 = RotateRight(flag ? ci2 : ci1, 2 * shift);
const ui32 bins2 = RotateRight(flag ? ci1 : ci2, 2 * shift);
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
const int bin1 = (bins1 >> (28 - 4 * i)) & 15;
const int bin2 = (bins2 >> (28 - 4 * i)) & 15;
const int tmp = (CmpBinsFunc.Compare(i, bin1, bin2, flag) ? 0 : 512) + f;
const int offset1 = 32 * bin1 + tmp + flag;
const int offset2 = 32 * bin2 + tmp + !flag;
groupTile.sync();
Slice[offset1] += w;
groupTile.sync();
Slice[offset2] += w;
}
}
#if __CUDA_ARCH__ < 700
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
#else
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
ui32 bins1[N];
ui32 bins2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], 2 * shift);
bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], 2 * shift);
}
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
int bin1[N];
int bin2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bin1[k] = (bins1[k] >> (28 - 4 * i)) & 15;
bin2[k] = (bins2[k] >> (28 - 4 * i)) & 15;
}
int offset1[N];
int offset2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int tmp = (CmpBinsFunc.Compare(i, bin1[k], bin2[k], flag) ? 0 : 512) + f;
offset1[k] = 32 * bin1[k] + tmp + flag;
offset2[k] = 32 * bin2[k] + tmp + !flag;
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset1[k]] += w[k];
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset2[k]] += w[k];
}
}
}
#endif
__forceinline__ __device__ void Reduce() {
Slice -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Slice[i];
}
Slice[warpHistSize + start] = sum;
}
}
__syncthreads();
const int maxFoldCount = 16;
const int fold = (threadIdx.x >> 1) & 15;
const int f = threadIdx.x / 32;
if (threadIdx.x < 256) {
float weightLeq = 0;
float weightGe = 0;
const bool isSecondBin = (threadIdx.x & 1);
if (fold < maxFoldCount) {
const volatile float* __restrict__ src = Slice
+ 1024 //warpHistSize
+ 32 * fold
+ 2 * f
+ isSecondBin;
weightLeq = src[0] + src[16];
weightGe = src[512] + src[528];
Slice[4 * (maxFoldCount * f + fold) + isSecondBin] = weightLeq;
Slice[4 * (maxFoldCount * f + fold) + 2 + isSecondBin] = weightGe;
}
}
__syncthreads();
}
};
template<int BlockSize, int N, int OuterUnroll>
__forceinline__ __device__ void ComputeSplitPropertiesHalfBytePass(const TCFeature* feature, int fCount,
const ui32* __restrict cindex,
const uint2* __restrict pairs,
const float* __restrict weight,
const TDataPartition* partition,
int blockIdx, int blockCount,
float* __restrict histogram,
float* __restrict smem) {
const int minDocsPerBlock = BlockSize * N * 8;
const int activeBlockCount = min((partition->Size + minDocsPerBlock - 1) / minDocsPerBlock, blockCount);
if (blockIdx >= activeBlockCount) {
return;
}
#define RUN_COMPUTE_HIST() \
ComputePairHistogram < BlockSize, N, OuterUnroll, THist >(partition->Offset, partition->Size,\
cindex,\
pairs, weight, \
blockIdx, activeBlockCount, \
hist);
if (HasOneHotFeatures(feature, fCount, reinterpret_cast<int*>(smem))) {
using TCmpBins = TCmpBinsWithOneHot<8>;
TCmpBins cmpBins(feature, fCount);
using THist = TPairHistHalfByte<BlockSize, TCmpBins>;
THist hist(smem, cmpBins);
RUN_COMPUTE_HIST();
} else {
using THist = TPairHistHalfByte<BlockSize>;
THist hist(smem, TCmpBinsWithoutOneHot());
RUN_COMPUTE_HIST();
}
#undef RUN_COMPUTE_HIST
if (threadIdx.x < 256) {
const int histId = threadIdx.x & 3;
const int fold = (threadIdx.x >> 2) & 15;
const int firstFid = (threadIdx.x >> 6) & 3;
for (int fid = firstFid; fid < fCount; fid += 4) {
const ui32 bfStart = feature[fid].FirstFoldIndex;
if (fold < feature[fid].Folds) {
const int readOffset = 4 * (16 * fid + fold) + histId;
const float val = smem[readOffset];
if (abs(val) > 1e-20f) {
atomicAdd(histogram + 4 * bfStart + 4 * fold + histId, val);
}
}
}
}
}
template<int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 700
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfBytePairs(const TCFeature* feature, int fCount,
const ui32* cindex,
const uint2* pairs,
const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int blocksPerPart = gridDim.x / ((fCount + 7) / 8);
const int localBlockIdx = blockIdx.x % blocksPerPart;
//histogram line size - size of one part hist.
const int featureOffset = (blockIdx.x / blocksPerPart) * 8;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 8);
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * histLineSize * 4ULL;
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL;
}
if (partition->Size == 0) {
return;
}
__shared__ float localHist[32 * BlockSize];
const int innerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::InnerUnroll();
const int outerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::OuterUnroll();
ComputeSplitPropertiesHalfBytePass<BlockSize, innerUnroll, outerUnroll>(feature, fCount, cindex, pairs,
weight, partition,
localBlockIdx, blocksPerPart,
histogram, &localHist[0]);
}
void ComputePairwiseHistogramHalfByte(const TCFeature* features, const TCFeature*,
const ui32 featureCount,
const ui32 halfByteFeatureCount,
const ui32* compressedIndex,
const uint2* pairs, ui32 pairCount,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
int parallelStreams,
TCudaStream stream) {
assert(featureCount == halfByteFeatureCount);
if (featureCount > 0) {
const int blockSize = 384;
dim3 numBlocks;
numBlocks.x = (featureCount + 7) / 8;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int blockPerFeatureMultiplier = CeilDivide<int>(TArchProps::SMCount() * blocksPerSm * 4, (parallelStreams * numBlocks.x * numBlocks.y * numBlocks.z));
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL) \
ComputeSplitPropertiesHalfBytePairs < blockSize, IS_FULL > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
if (fullPass) {
NB_HIST(true)
} else {
NB_HIST(false)
}
#undef NB_HIST
}
}
}
|
be9156c69ee162834a3d9a61735ba94b3be711d9.cu
|
#include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): tune it
template<bool IsFullPass>
struct THalfBytePairwiseHistUnrollTrait {
static constexpr int InnerUnroll() {
#if __CUDA_ARCH__ <= 350
return 2;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 8;//IsFullPass ? 4 : 8;
#endif
}
static constexpr int OuterUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 1;
#endif
}
};
template<int BLOCK_SIZE, class TCmpBins = TCmpBinsWithoutOneHot>
struct TPairHistHalfByte {
TCmpBins CmpBinsFunc;
float* Slice;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
//we store 4 histograms per block
// x8 feature and x4 histograms, though histStart = blockIdx * 16
return warpOffset + (threadIdx.x & 16);
}
__forceinline__ __device__ TPairHistHalfByte(float* buff, TCmpBins cmpBinsFunc)
: CmpBinsFunc(cmpBinsFunc) {
Slice = buff;
for (int i = threadIdx.x; i < BLOCK_SIZE * 32; i += BLOCK_SIZE) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1,
const ui32 ci2,
const float w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
const ui32 bins1 = RotateRight(flag ? ci2 : ci1, 2 * shift);
const ui32 bins2 = RotateRight(flag ? ci1 : ci2, 2 * shift);
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
const int bin1 = (bins1 >> (28 - 4 * i)) & 15;
const int bin2 = (bins2 >> (28 - 4 * i)) & 15;
const int tmp = (CmpBinsFunc.Compare(i, bin1, bin2, flag) ? 0 : 512) + f;
const int offset1 = 32 * bin1 + tmp + flag;
const int offset2 = 32 * bin2 + tmp + !flag;
groupTile.sync();
Slice[offset1] += w;
groupTile.sync();
Slice[offset2] += w;
}
}
#if __CUDA_ARCH__ < 700
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
#else
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = threadIdx.x & 14;
ui32 bins1[N];
ui32 bins2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], 2 * shift);
bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], 2 * shift);
}
#pragma unroll
for (int i = 0; i < 8; i++) {
const int f = ((shift + 2 * i) & 14);
int bin1[N];
int bin2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bin1[k] = (bins1[k] >> (28 - 4 * i)) & 15;
bin2[k] = (bins2[k] >> (28 - 4 * i)) & 15;
}
int offset1[N];
int offset2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int tmp = (CmpBinsFunc.Compare(i, bin1[k], bin2[k], flag) ? 0 : 512) + f;
offset1[k] = 32 * bin1[k] + tmp + flag;
offset2[k] = 32 * bin2[k] + tmp + !flag;
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset1[k]] += w[k];
}
groupTile.sync();
#pragma unroll
for (int k = 0; k < N; ++k) {
Slice[offset2[k]] += w[k];
}
}
}
#endif
__forceinline__ __device__ void Reduce() {
Slice -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Slice[i];
}
Slice[warpHistSize + start] = sum;
}
}
__syncthreads();
const int maxFoldCount = 16;
const int fold = (threadIdx.x >> 1) & 15;
const int f = threadIdx.x / 32;
if (threadIdx.x < 256) {
float weightLeq = 0;
float weightGe = 0;
const bool isSecondBin = (threadIdx.x & 1);
if (fold < maxFoldCount) {
const volatile float* __restrict__ src = Slice
+ 1024 //warpHistSize
+ 32 * fold
+ 2 * f
+ isSecondBin;
weightLeq = src[0] + src[16];
weightGe = src[512] + src[528];
Slice[4 * (maxFoldCount * f + fold) + isSecondBin] = weightLeq;
Slice[4 * (maxFoldCount * f + fold) + 2 + isSecondBin] = weightGe;
}
}
__syncthreads();
}
};
template<int BlockSize, int N, int OuterUnroll>
__forceinline__ __device__ void ComputeSplitPropertiesHalfBytePass(const TCFeature* feature, int fCount,
const ui32* __restrict cindex,
const uint2* __restrict pairs,
const float* __restrict weight,
const TDataPartition* partition,
int blockIdx, int blockCount,
float* __restrict histogram,
float* __restrict smem) {
const int minDocsPerBlock = BlockSize * N * 8;
const int activeBlockCount = min((partition->Size + minDocsPerBlock - 1) / minDocsPerBlock, blockCount);
if (blockIdx >= activeBlockCount) {
return;
}
#define RUN_COMPUTE_HIST() \
ComputePairHistogram < BlockSize, N, OuterUnroll, THist >(partition->Offset, partition->Size,\
cindex,\
pairs, weight, \
blockIdx, activeBlockCount, \
hist);
if (HasOneHotFeatures(feature, fCount, reinterpret_cast<int*>(smem))) {
using TCmpBins = TCmpBinsWithOneHot<8>;
TCmpBins cmpBins(feature, fCount);
using THist = TPairHistHalfByte<BlockSize, TCmpBins>;
THist hist(smem, cmpBins);
RUN_COMPUTE_HIST();
} else {
using THist = TPairHistHalfByte<BlockSize>;
THist hist(smem, TCmpBinsWithoutOneHot());
RUN_COMPUTE_HIST();
}
#undef RUN_COMPUTE_HIST
if (threadIdx.x < 256) {
const int histId = threadIdx.x & 3;
const int fold = (threadIdx.x >> 2) & 15;
const int firstFid = (threadIdx.x >> 6) & 3;
for (int fid = firstFid; fid < fCount; fid += 4) {
const ui32 bfStart = feature[fid].FirstFoldIndex;
if (fold < feature[fid].Folds) {
const int readOffset = 4 * (16 * fid + fold) + histId;
const float val = smem[readOffset];
if (abs(val) > 1e-20f) {
atomicAdd(histogram + 4 * bfStart + 4 * fold + histId, val);
}
}
}
}
}
template<int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 700
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfBytePairs(const TCFeature* feature, int fCount,
const ui32* cindex,
const uint2* pairs,
const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int blocksPerPart = gridDim.x / ((fCount + 7) / 8);
const int localBlockIdx = blockIdx.x % blocksPerPart;
//histogram line size - size of one part hist.
const int featureOffset = (blockIdx.x / blocksPerPart) * 8;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 8);
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * histLineSize * 4ULL;
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL;
}
if (partition->Size == 0) {
return;
}
__shared__ float localHist[32 * BlockSize];
const int innerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::InnerUnroll();
const int outerUnroll = THalfBytePairwiseHistUnrollTrait<IsFullPass>::OuterUnroll();
ComputeSplitPropertiesHalfBytePass<BlockSize, innerUnroll, outerUnroll>(feature, fCount, cindex, pairs,
weight, partition,
localBlockIdx, blocksPerPart,
histogram, &localHist[0]);
}
void ComputePairwiseHistogramHalfByte(const TCFeature* features, const TCFeature*,
const ui32 featureCount,
const ui32 halfByteFeatureCount,
const ui32* compressedIndex,
const uint2* pairs, ui32 pairCount,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
int parallelStreams,
TCudaStream stream) {
assert(featureCount == halfByteFeatureCount);
if (featureCount > 0) {
const int blockSize = 384;
dim3 numBlocks;
numBlocks.x = (featureCount + 7) / 8;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;
const int blockPerFeatureMultiplier = CeilDivide<int>(TArchProps::SMCount() * blocksPerSm * 4, (parallelStreams * numBlocks.x * numBlocks.y * numBlocks.z));
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL) \
ComputeSplitPropertiesHalfBytePairs < blockSize, IS_FULL > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
if (fullPass) {
NB_HIST(true)
} else {
NB_HIST(false)
}
#undef NB_HIST
}
}
}
|
ff343fb4932f8f67abe6a09d76165c290878463e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Compute the center of each group.
*/
extern "C" __global__ void computeGroupCenters(const real4* __restrict__ posq, const int* __restrict__ groupParticles,
const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets, real4* __restrict__ centerPositions) {
__shared__ volatile real3 temp[64];
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
// The threads in this block work together to compute the center one group.
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
real3 center = make_real3(0, 0, 0);
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
real4 pos = posq[atom];
center.x += weight*pos.x;
center.y += weight*pos.y;
center.z += weight*pos.z;
}
// Sum the values.
int thread = threadIdx.x;
temp[thread].x = center.x;
temp[thread].y = center.y;
temp[thread].z = center.z;
__syncthreads();
if (thread < 32) {
temp[thread].x += temp[thread+32].x;
temp[thread].y += temp[thread+32].y;
temp[thread].z += temp[thread+32].z;
if (thread < 16) {
temp[thread].x += temp[thread+16].x;
temp[thread].y += temp[thread+16].y;
temp[thread].z += temp[thread+16].z;
}
if (thread < 8) {
temp[thread].x += temp[thread+8].x;
temp[thread].y += temp[thread+8].y;
temp[thread].z += temp[thread+8].z;
}
if (thread < 4) {
temp[thread].x += temp[thread+4].x;
temp[thread].y += temp[thread+4].y;
temp[thread].z += temp[thread+4].z;
}
if (thread < 2) {
temp[thread].x += temp[thread+2].x;
temp[thread].y += temp[thread+2].y;
temp[thread].z += temp[thread+2].z;
}
}
if (thread == 0)
centerPositions[group] = make_real4(temp[0].x+temp[1].x, temp[0].y+temp[1].y, temp[0].z+temp[1].z, 0);
}
}
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* Compute the difference between two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2, bool periodic, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0);
if (periodic)
APPLY_PERIODIC_TO_DELTA(result);
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
__device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 cp = cross(vec1, vec2);
return make_real4(cp.x, cp.y, cp.z, cp.x*cp.x+cp.y*cp.y+cp.z*cp.z);
}
/**
* Compute the forces on groups based on the bonds.
*/
extern "C" __global__ void computeGroupForces(unsigned long long* __restrict__ groupForce, mixed* __restrict__ energyBuffer, const real4* __restrict__ centerPositions,
const int* __restrict__ bondGroups, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
EXTRA_ARGS) {
mixed energy = 0;
INIT_PARAM_DERIVS
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_BONDS; index += blockDim.x*gridDim.x) {
COMPUTE_FORCE
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
SAVE_PARAM_DERIVS
}
/**
* Apply the forces from the group centers to the individual atoms.
*/
extern "C" __global__ void applyForcesToAtoms(const int* __restrict__ groupParticles, const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets,
const long long* __restrict__ groupForce, unsigned long long* __restrict__ atomForce) {
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
long long fx = groupForce[group];
long long fy = groupForce[group+NUM_GROUPS];
long long fz = groupForce[group+NUM_GROUPS*2];
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
atomicAdd(&atomForce[atom], static_cast<unsigned long long>((long long) (fx*weight)));
atomicAdd(&atomForce[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fy*weight)));
atomicAdd(&atomForce[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fz*weight)));
}
}
}
|
ff343fb4932f8f67abe6a09d76165c290878463e.cu
|
/**
* Compute the center of each group.
*/
extern "C" __global__ void computeGroupCenters(const real4* __restrict__ posq, const int* __restrict__ groupParticles,
const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets, real4* __restrict__ centerPositions) {
__shared__ volatile real3 temp[64];
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
// The threads in this block work together to compute the center one group.
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
real3 center = make_real3(0, 0, 0);
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
real4 pos = posq[atom];
center.x += weight*pos.x;
center.y += weight*pos.y;
center.z += weight*pos.z;
}
// Sum the values.
int thread = threadIdx.x;
temp[thread].x = center.x;
temp[thread].y = center.y;
temp[thread].z = center.z;
__syncthreads();
if (thread < 32) {
temp[thread].x += temp[thread+32].x;
temp[thread].y += temp[thread+32].y;
temp[thread].z += temp[thread+32].z;
if (thread < 16) {
temp[thread].x += temp[thread+16].x;
temp[thread].y += temp[thread+16].y;
temp[thread].z += temp[thread+16].z;
}
if (thread < 8) {
temp[thread].x += temp[thread+8].x;
temp[thread].y += temp[thread+8].y;
temp[thread].z += temp[thread+8].z;
}
if (thread < 4) {
temp[thread].x += temp[thread+4].x;
temp[thread].y += temp[thread+4].y;
temp[thread].z += temp[thread+4].z;
}
if (thread < 2) {
temp[thread].x += temp[thread+2].x;
temp[thread].y += temp[thread+2].y;
temp[thread].z += temp[thread+2].z;
}
}
if (thread == 0)
centerPositions[group] = make_real4(temp[0].x+temp[1].x, temp[0].y+temp[1].y, temp[0].z+temp[1].z, 0);
}
}
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* Compute the difference between two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2, bool periodic, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0);
if (periodic)
APPLY_PERIODIC_TO_DELTA(result);
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
__device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 cp = cross(vec1, vec2);
return make_real4(cp.x, cp.y, cp.z, cp.x*cp.x+cp.y*cp.y+cp.z*cp.z);
}
/**
* Compute the forces on groups based on the bonds.
*/
extern "C" __global__ void computeGroupForces(unsigned long long* __restrict__ groupForce, mixed* __restrict__ energyBuffer, const real4* __restrict__ centerPositions,
const int* __restrict__ bondGroups, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ
EXTRA_ARGS) {
mixed energy = 0;
INIT_PARAM_DERIVS
for (int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_BONDS; index += blockDim.x*gridDim.x) {
COMPUTE_FORCE
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
SAVE_PARAM_DERIVS
}
/**
* Apply the forces from the group centers to the individual atoms.
*/
extern "C" __global__ void applyForcesToAtoms(const int* __restrict__ groupParticles, const real* __restrict__ groupWeights, const int* __restrict__ groupOffsets,
const long long* __restrict__ groupForce, unsigned long long* __restrict__ atomForce) {
for (int group = blockIdx.x; group < NUM_GROUPS; group += gridDim.x) {
long long fx = groupForce[group];
long long fy = groupForce[group+NUM_GROUPS];
long long fz = groupForce[group+NUM_GROUPS*2];
int firstIndex = groupOffsets[group];
int lastIndex = groupOffsets[group+1];
for (int index = threadIdx.x; index < lastIndex-firstIndex; index += blockDim.x) {
int atom = groupParticles[firstIndex+index];
real weight = groupWeights[firstIndex+index];
atomicAdd(&atomForce[atom], static_cast<unsigned long long>((long long) (fx*weight)));
atomicAdd(&atomForce[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fy*weight)));
atomicAdd(&atomForce[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (fz*weight)));
}
}
}
|
7b7e9869aeffa6bab6d34399330fab5ade79e0f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <cmath>
#include <ctime>
#include "common/book.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
using namespace std;
typedef float FLOAT;
// #define N 20
/**2*/
texture<FLOAT,2> tex2DRef;
__global__ void kernel(FLOAT *dev_b,const int height,const int width)
{
int x=threadIdx.x;
int y=threadIdx.y;
int idx=x+y*blockDim.x;
if(idx>=width*height) return;
// if(x>=width || y>= height) return;
dev_b[idx]=tex2D(tex2DRef,x,y)+2;
}
int main()
{
const int N=20;
int width=5;
int height=N/width;
int nBytes=N*sizeof(FLOAT);
FLOAT (*host_a)[width];
// FLOAT (*d_f)[width],
FLOAT *dev_a;
HANDLE_ERROR(hipHostMalloc((void **)&host_a,nBytes));
for(int i=0;i<height;++i)
{
for(int j=0;j<width;++j)
{
host_a[i][j]=1;
}
}
//
size_t pitch;
HANDLE_ERROR( hipMallocPitch((void**)&dev_a, &pitch, width*sizeof(FLOAT), height) );
HANDLE_ERROR( hipMemcpy2D(dev_a, // device destination
pitch, // device pitch (calculated above)
host_a, // src on host
width*sizeof(float), // pitch on src (no padding so just width of row)
width*sizeof(float), // width of data in bytes
height, // height of data
hipMemcpyHostToDevice) );
// hipChannelFormatDesc desc = hipCreateChannelDesc<FLOAT>();
HANDLE_ERROR(hipBindTexture2D(NULL,tex2DRef,dev_a,tex2DRef.channelDesc,
width,height,pitch));
FLOAT *dev_b=NULL,*host_b=NULL;
HANDLE_ERROR(hipHostMalloc((void **)&host_b,nBytes));
HANDLE_ERROR(hipMalloc((void **)&dev_b,nBytes));
dim3 threads(32,32);
hipLaunchKernelGGL(( kernel), dim3(1),dim3(threads), 0, 0, dev_b,N/width,width);
HANDLE_ERROR(hipMemcpy(host_b,dev_b,nBytes,hipMemcpyDeviceToHost));
// hipDeviceSynchronize(); //GPU
// print
for(int i=0;i<N;++i) cout<<host_b[i]<<" ";
HANDLE_ERROR(hipUnbindTexture(tex2DRef));//
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipHostFree(host_a));
HANDLE_ERROR(hipHostFree(host_b));
return 0;
}
|
7b7e9869aeffa6bab6d34399330fab5ade79e0f2.cu
|
#include <iostream>
#include <cuda.h>
#include <cmath>
#include <ctime>
#include "common/book.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
using namespace std;
typedef float FLOAT;
// #define N 20
/**声明2维纹理内存*/
texture<FLOAT,2> tex2DRef;
__global__ void kernel(FLOAT *dev_b,const int height,const int width)
{
int x=threadIdx.x;
int y=threadIdx.y;
int idx=x+y*blockDim.x;
if(idx>=width*height) return;
// if(x>=width || y>= height) return;
dev_b[idx]=tex2D(tex2DRef,x,y)+2;
}
int main()
{
const int N=20;
int width=5;
int height=N/width;
int nBytes=N*sizeof(FLOAT);
FLOAT (*host_a)[width];
// FLOAT (*d_f)[width],
FLOAT *dev_a;
HANDLE_ERROR(cudaMallocHost((void **)&host_a,nBytes));
for(int i=0;i<height;++i)
{
for(int j=0;j<width;++j)
{
host_a[i][j]=1;
}
}
// 纹理内存绑定全局内存
size_t pitch;
HANDLE_ERROR( cudaMallocPitch((void**)&dev_a, &pitch, width*sizeof(FLOAT), height) );
HANDLE_ERROR( cudaMemcpy2D(dev_a, // device destination
pitch, // device pitch (calculated above)
host_a, // src on host
width*sizeof(float), // pitch on src (no padding so just width of row)
width*sizeof(float), // width of data in bytes
height, // height of data
cudaMemcpyHostToDevice) );
// cudaChannelFormatDesc desc = cudaCreateChannelDesc<FLOAT>();
HANDLE_ERROR(cudaBindTexture2D(NULL,tex2DRef,dev_a,tex2DRef.channelDesc,
width,height,pitch));
FLOAT *dev_b=NULL,*host_b=NULL;
HANDLE_ERROR(cudaMallocHost((void **)&host_b,nBytes));
HANDLE_ERROR(cudaMalloc((void **)&dev_b,nBytes));
dim3 threads(32,32);
kernel<<<1,threads>>>(dev_b,N/width,width);
HANDLE_ERROR(cudaMemcpy(host_b,dev_b,nBytes,cudaMemcpyDeviceToHost));
// cudaDeviceSynchronize(); //等待GPU执行完成, 有多种方式
// print
for(int i=0;i<N;++i) cout<<host_b[i]<<" ";
HANDLE_ERROR(cudaUnbindTexture(tex2DRef));// 解除绑定
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFreeHost(host_a));
HANDLE_ERROR(cudaFreeHost(host_b));
return 0;
}
|
d0c73abf83a3e52c1997a1a1626540aa29d1a3a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "THHApply.cuh"
#include "THHHalf.h"
#include "THHNumerics.cuh"
inline int curGPU() {
int curDev;
THCudaCheck(hipGetDevice(&curDev));
return curDev;
}
// Copy operator for the pointwise apply kernel
template <typename TypeDst, typename TypeSrc>
struct CopyOp {
__device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src));
#else
*dst = ScalarConvert<TypeSrc, TypeDst>::to(*src);
#endif
}
};
// Copy for the same type to the same type
template <typename TensorTypeDst, typename TensorTypeSrc>
void
THC_copyTensor(THCState* state, TensorTypeDst* dst, TensorTypeSrc* src) {
ptrdiff_t totalElements = TensorUtils<TensorTypeDst>::getNumElements(state, dst);
THArgCheck(totalElements ==
TensorUtils<TensorTypeSrc>::getNumElements(state, src),
2, "sizes do not match");
if (TensorUtils<TensorTypeDst>::getDims(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is
// contiguous).
// -AND: both tensors have the same type.
bool sameType = isSameType<TensorTypeSrc, TensorTypeDst>();
bool srcContig = TensorUtils<TensorTypeSrc>::isContiguous(state, src);
bool dstContig = TensorUtils<TensorTypeDst>::isContiguous(state, dst);
bool memcpyEligible =
((srcContig && dstContig) || (totalElements == 1)) && sameType;
int srcDev = TensorUtils<TensorTypeSrc>::getDevice(state, src);
int dstDev = TensorUtils<TensorTypeDst>::getDevice(state, dst);
int oldDev = curGPU();
// We always perform the copy on the source device, using the
// current stream on the source device.
// If the copy is on the default stream, then we fully synchronize
// both src and dst's default streams for completion of the
// copy. We have to explicitly do this for non-contig copies.
// This mimics the behavior of cross-device hipMemcpyAsync on
// the default stream.
// If the copy is not on the default stream, then it is up to the
// user to add needed synchronization on the dst device, since the
// stream on the dst device that wishes to synchronize may not be
// the same index as the one on the src device.
hipStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev);
if (srcDev != dstDev && copyStream == NULL) {
// This is a cross-device copy on the default stream. We perform a
// two-way barrier between both devices' default streams before
// the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are
// handled, so that no one is operating on the dst memory when
// we perform the copy.
// src waits on dst barrier (src already waits on src)
hipEvent_t dstReady;
THCudaCheck(hipSetDevice(dstDev));
THCudaCheck(hipEventCreateWithFlags(&dstReady, hipEventDisableTiming));
THCudaCheck(hipEventRecord(dstReady, NULL));
THCudaCheck(hipSetDevice(srcDev));
THCudaCheck(hipStreamWaitEvent(NULL, dstReady, 0));
THCudaCheck(hipEventDestroy(dstReady));
} else if (srcDev != oldDev) {
THCudaCheck(hipSetDevice(srcDev));
}
// We are now on srcDev
if (memcpyEligible) {
// Perform the copy
THCudaCheck(hipMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dst),
TensorUtils<TensorTypeSrc>::getData(state, src),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
hipMemcpyDeviceToDevice,
copyStream));
} else {
// Non-contiguous copy or a type-conversion copy
// We avoid creating temporary memory copies if possible.
// If both src and dst are on the same device, or if they are on
// different devices and p2p access is enabled, perform the copy
// by a pointwise copy kernel.
// Otherwise, we'll have to make contiguous (which will in fact
// invoke copy() again), and then perform the copy.
// FIXME: might want to consider only running the pointwise kernel
// if both src and dst innermost dimensions are contiguous. If
// they are not, then taking the hit of the memory allocation/free
// might be worth it to avoid non-coalesced reads or writes.
// A device always has access to itself, so this also handles the
// case srcDev == dstDev
if (THCState_getPeerToPeerAccess(state, srcDev, dstDev)) {
bool succ =
THC_pointwiseApply2(
state, dst, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
} else {
// GPUs can't access each other directly, but the tensors
// involved are non-contiguous and/or are different types.
// Make sure the src is contiguous and in the same type as dst
THCudaCheck(hipSetDevice(srcDev));
TensorTypeDst* srcContig = NULL;
if (sameType) {
srcContig =
(TensorTypeDst*) // this is actually the same type as src
TensorUtils<TensorTypeSrc>::newContiguous(state, src);
} else {
// Types are different
// Copy into the new format, contiguous, on the source device
srcContig = TensorUtils<TensorTypeDst>::newTensor(state);
TensorUtils<TensorTypeDst>::resizeAs(state, srcContig, dst);
bool succ =
THC_pointwiseApply2(
state, srcContig, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
}
// Make sure the dst is contiguous
THCudaCheck(hipSetDevice(dstDev));
TensorTypeDst* dstContig =
TensorUtils<TensorTypeDst>::newContiguous(state, dst);
// Now, we are ready for a cross-device memcpy of contiguous
// data, of the same layout and type
THCudaCheck(hipSetDevice(srcDev));
THCudaCheck(hipMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dstContig),
TensorUtils<TensorTypeDst>::getData(state, srcContig),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
hipMemcpyDeviceToDevice,
copyStream));
// We are done with the src
TensorUtils<TensorTypeDst>::free(state, srcContig);
if (dst != dstContig) {
TensorUtils<TensorTypeDst>::freeCopyTo(state, dstContig, dst);
} else {
TensorUtils<TensorTypeDst>::free(state, dstContig);
}
// We're still on srcDev at this point
}
}
if (srcDev != dstDev && copyStream == NULL) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on srcDev, record default stream event
hipEvent_t srcReady;
THCudaCheck(hipEventCreateWithFlags(&srcReady, hipEventDisableTiming));
THCudaCheck(hipEventRecord(srcReady, NULL));
THCudaCheck(hipSetDevice(dstDev));
THCudaCheck(hipStreamWaitEvent(NULL, srcReady, 0));
THCudaCheck(hipEventDestroy(srcReady));
// We are now on dstDev (right above). Restore prior device from dst
if (dstDev != oldDev) {
THCudaCheck(hipSetDevice(oldDev));
}
} else {
// We are still on srcDev. Restore prior device from src
if (srcDev != oldDev) {
THCudaCheck(hipSetDevice(oldDev));
}
}
THCudaCheck(hipGetLastError());
}
#include "generic/THCTensorCopy.cu"
#include "THHGenerateAllTypes.h"
|
d0c73abf83a3e52c1997a1a1626540aa29d1a3a7.cu
|
#include "THCApply.cuh"
#include "THCHalf.h"
#include "THCNumerics.cuh"
inline int curGPU() {
int curDev;
THCudaCheck(cudaGetDevice(&curDev));
return curDev;
}
// Copy operator for the pointwise apply kernel
template <typename TypeDst, typename TypeSrc>
struct CopyOp {
__device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src));
#else
*dst = ScalarConvert<TypeSrc, TypeDst>::to(*src);
#endif
}
};
// Copy for the same type to the same type
template <typename TensorTypeDst, typename TensorTypeSrc>
void
THC_copyTensor(THCState* state, TensorTypeDst* dst, TensorTypeSrc* src) {
ptrdiff_t totalElements = TensorUtils<TensorTypeDst>::getNumElements(state, dst);
THArgCheck(totalElements ==
TensorUtils<TensorTypeSrc>::getNumElements(state, src),
2, "sizes do not match");
if (TensorUtils<TensorTypeDst>::getDims(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is
// contiguous).
// -AND: both tensors have the same type.
bool sameType = isSameType<TensorTypeSrc, TensorTypeDst>();
bool srcContig = TensorUtils<TensorTypeSrc>::isContiguous(state, src);
bool dstContig = TensorUtils<TensorTypeDst>::isContiguous(state, dst);
bool memcpyEligible =
((srcContig && dstContig) || (totalElements == 1)) && sameType;
int srcDev = TensorUtils<TensorTypeSrc>::getDevice(state, src);
int dstDev = TensorUtils<TensorTypeDst>::getDevice(state, dst);
int oldDev = curGPU();
// We always perform the copy on the source device, using the
// current stream on the source device.
// If the copy is on the default stream, then we fully synchronize
// both src and dst's default streams for completion of the
// copy. We have to explicitly do this for non-contig copies.
// This mimics the behavior of cross-device cudaMemcpyAsync on
// the default stream.
// If the copy is not on the default stream, then it is up to the
// user to add needed synchronization on the dst device, since the
// stream on the dst device that wishes to synchronize may not be
// the same index as the one on the src device.
cudaStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev);
if (srcDev != dstDev && copyStream == NULL) {
// This is a cross-device copy on the default stream. We perform a
// two-way barrier between both devices' default streams before
// the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are
// handled, so that no one is operating on the dst memory when
// we perform the copy.
// src waits on dst barrier (src already waits on src)
cudaEvent_t dstReady;
THCudaCheck(cudaSetDevice(dstDev));
THCudaCheck(cudaEventCreateWithFlags(&dstReady, cudaEventDisableTiming));
THCudaCheck(cudaEventRecord(dstReady, NULL));
THCudaCheck(cudaSetDevice(srcDev));
THCudaCheck(cudaStreamWaitEvent(NULL, dstReady, 0));
THCudaCheck(cudaEventDestroy(dstReady));
} else if (srcDev != oldDev) {
THCudaCheck(cudaSetDevice(srcDev));
}
// We are now on srcDev
if (memcpyEligible) {
// Perform the copy
THCudaCheck(cudaMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dst),
TensorUtils<TensorTypeSrc>::getData(state, src),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
cudaMemcpyDeviceToDevice,
copyStream));
} else {
// Non-contiguous copy or a type-conversion copy
// We avoid creating temporary memory copies if possible.
// If both src and dst are on the same device, or if they are on
// different devices and p2p access is enabled, perform the copy
// by a pointwise copy kernel.
// Otherwise, we'll have to make contiguous (which will in fact
// invoke copy() again), and then perform the copy.
// FIXME: might want to consider only running the pointwise kernel
// if both src and dst innermost dimensions are contiguous. If
// they are not, then taking the hit of the memory allocation/free
// might be worth it to avoid non-coalesced reads or writes.
// A device always has access to itself, so this also handles the
// case srcDev == dstDev
if (THCState_getPeerToPeerAccess(state, srcDev, dstDev)) {
bool succ =
THC_pointwiseApply2(
state, dst, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
} else {
// GPUs can't access each other directly, but the tensors
// involved are non-contiguous and/or are different types.
// Make sure the src is contiguous and in the same type as dst
THCudaCheck(cudaSetDevice(srcDev));
TensorTypeDst* srcContig = NULL;
if (sameType) {
srcContig =
(TensorTypeDst*) // this is actually the same type as src
TensorUtils<TensorTypeSrc>::newContiguous(state, src);
} else {
// Types are different
// Copy into the new format, contiguous, on the source device
srcContig = TensorUtils<TensorTypeDst>::newTensor(state);
TensorUtils<TensorTypeDst>::resizeAs(state, srcContig, dst);
bool succ =
THC_pointwiseApply2(
state, srcContig, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
}
// Make sure the dst is contiguous
THCudaCheck(cudaSetDevice(dstDev));
TensorTypeDst* dstContig =
TensorUtils<TensorTypeDst>::newContiguous(state, dst);
// Now, we are ready for a cross-device memcpy of contiguous
// data, of the same layout and type
THCudaCheck(cudaSetDevice(srcDev));
THCudaCheck(cudaMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dstContig),
TensorUtils<TensorTypeDst>::getData(state, srcContig),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
cudaMemcpyDeviceToDevice,
copyStream));
// We are done with the src
TensorUtils<TensorTypeDst>::free(state, srcContig);
if (dst != dstContig) {
TensorUtils<TensorTypeDst>::freeCopyTo(state, dstContig, dst);
} else {
TensorUtils<TensorTypeDst>::free(state, dstContig);
}
// We're still on srcDev at this point
}
}
if (srcDev != dstDev && copyStream == NULL) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on srcDev, record default stream event
cudaEvent_t srcReady;
THCudaCheck(cudaEventCreateWithFlags(&srcReady, cudaEventDisableTiming));
THCudaCheck(cudaEventRecord(srcReady, NULL));
THCudaCheck(cudaSetDevice(dstDev));
THCudaCheck(cudaStreamWaitEvent(NULL, srcReady, 0));
THCudaCheck(cudaEventDestroy(srcReady));
// We are now on dstDev (right above). Restore prior device from dst
if (dstDev != oldDev) {
THCudaCheck(cudaSetDevice(oldDev));
}
} else {
// We are still on srcDev. Restore prior device from src
if (srcDev != oldDev) {
THCudaCheck(cudaSetDevice(oldDev));
}
}
THCudaCheck(cudaGetLastError());
}
#include "generic/THCTensorCopy.cu"
#include "THCGenerateAllTypes.h"
|
f4b9c6aa5df79e40dddf611edd691399336553c8.hip
|
// !!! This is a file automatically generated by hipify!!!
/**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <EvoNet/ml/ModelKernalGpu.h>
using namespace EvoNet;
using namespace std;
void test_constructorGpuDevice()
{
ModelKernalGpu<float>* ptr = nullptr;
ModelKernalGpu<float>* nullPointer = nullptr;
ptr = new ModelKernalGpu<float>();
assert(ptr != nullPointer);
}
void test_destructorGpuDevice()
{
ModelKernalGpu<float>* ptr = nullptr;
ptr = new ModelKernalGpu<float>();
delete ptr;
}
void test_nodeActivationGpuDevice()
{
ModelKernalGpu<float> kernal;
const int device_id = 0;
std::shared_ptr<ActivationTensorOp<float, Eigen::GpuDevice>> activation_function = std::make_shared<ReLUTensorOp<float, Eigen::GpuDevice>>(ReLUTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int source_time_step = 0;
const int node_time_step = 0;
float* h_node_input;
float* d_node_input;
float* h_node_output;
float* d_node_output;
float* h_node_dt;
float* d_node_dt;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_node_input), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_node_input), bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_node_output), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_node_output), bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_node_dt), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_node_dt), bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_input(h_node_input, batch_size, memory_size, layer_size);
node_input.setValues({ {{-1, 1}, {0, 0}},
{{-2, 2}, {0, 0}},
{{-3, 3}, {0, 0}},
{{-4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_output(h_node_output, batch_size, memory_size, layer_size);
node_output.setConstant(0);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_dt(h_node_dt, batch_size, memory_size, layer_size);
node_dt.setConstant(1);
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeNodeActivation(
h_node_input,
d_node_input,
h_node_output,
d_node_output,
h_node_dt,
d_node_dt,
activation_function,
batch_size,
memory_size,
layer_size,
node_time_step,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 3> expected_output(batch_size, memory_size, layer_size);
expected_output.setValues({ {{0, 1}, {0, 0}},
{{0, 2}, {0, 0}},
{{0, 3}, {0, 0}},
{{0, 4}, {0, 0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < layer_size; ++node_iter) {
//std::cout << "[Output] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_output(batch_iter, memory_iter, node_iter) << std::endl;
assert(node_output(batch_iter, memory_iter, node_iter) == expected_output(batch_iter, memory_iter, node_iter));
}
}
}
// release resources
assert(hipHostFree(h_node_input) == hipSuccess);
assert(hipFree(d_node_input) == hipSuccess);
assert(hipHostFree(h_node_output) == hipSuccess);
assert(hipFree(d_node_output) == hipSuccess);
assert(hipHostFree(h_node_dt) == hipSuccess);
assert(hipFree(d_node_dt) == hipSuccess);
}
void test_nodeDerivativeGpuDevice()
{
ModelKernalGpu<float> kernal;
const int device_id = 0;
std::shared_ptr<ActivationTensorOp<float, Eigen::GpuDevice>> activation_grad_function = std::make_shared<ReLUGradTensorOp<float, Eigen::GpuDevice>>(ReLUGradTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int source_time_step = 0;
const int node_time_step = 0;
float* h_node_output;
float* d_node_output;
float* h_node_derivative;
float* d_node_derivative;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_node_output), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_node_output), bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_node_derivative), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_node_derivative), bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_output(h_node_output, batch_size, memory_size, layer_size);
node_output.setValues({ {{-1, 1}, {0, 0}},
{{-2, 2}, {0, 0}},
{{-3, 3}, {0, 0}},
{{-4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_derivative(h_node_derivative, batch_size, memory_size, layer_size);
node_derivative.setConstant(0);
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeNodeDerivative(
h_node_output,
d_node_output,
h_node_derivative,
d_node_derivative,
activation_grad_function,
batch_size,
memory_size,
layer_size,
node_time_step,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 3> expected_derivative(batch_size, memory_size, layer_size);
expected_derivative.setValues({ {{0, 1}, {0, 0}},
{{0, 1}, {0, 0}},
{{0, 1}, {0, 0}},
{{0, 1}, {0, 0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < layer_size; ++node_iter) {
//std::cout << "[Derivative] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_derivative(batch_iter, memory_iter, node_iter) << std::endl;
assert(node_derivative(batch_iter, memory_iter, node_iter) == expected_derivative(batch_iter, memory_iter, node_iter));
}
}
}
// release resources
assert(hipHostFree(h_node_output) == hipSuccess);
assert(hipFree(d_node_output) == hipSuccess);
assert(hipHostFree(h_node_derivative) == hipSuccess);
assert(hipFree(d_node_derivative) == hipSuccess);
}
void test_forwardPropogationGpuDevice()
{
ModelKernalGpu<float> kernal;
const int device_id = 0;
std::shared_ptr<IntegrationTensorOp<float, Eigen::GpuDevice>> integration_function = std::make_shared<SumTensorOp<float, Eigen::GpuDevice>>(SumTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_steps = 0;
const int sink_time_step = 0;
float* h_source_outputs;
float* d_source_outputs;
float* h_weights;
float* d_weights;
float* h_sink_input;
float* d_sink_input;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float);
std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_source_outputs), source_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_source_outputs), source_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_weights), weight_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_weights), weight_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_sink_input), sink_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_sink_input), sink_bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_output(h_source_outputs, batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weights, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_input(h_sink_input, batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeForwardPropogation(
h_source_outputs,
d_source_outputs,
h_weights,
d_weights,
h_sink_input,
d_sink_input,
integration_function,
batch_size,
memory_size,
source_layer_size,
sink_layer_size,
source_time_steps,
sink_time_step,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 3> expected_input(batch_size, memory_size, sink_layer_size);
expected_input.setValues({ {{2}, {0}},
{{4}, {0}},
{{6}, {0}},
{{8}, {0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < sink_layer_size; ++node_iter) {
//std::cout << "[Input] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << sink_input(batch_iter, memory_iter, node_iter) << std::endl;
assert(sink_input(batch_iter, memory_iter, node_iter) == expected_input(batch_iter, memory_iter, node_iter));
}
}
}
// release resources
assert(hipHostFree(h_source_outputs) == hipSuccess);
assert(hipFree(d_source_outputs) == hipSuccess);
assert(hipHostFree(h_weights) == hipSuccess);
assert(hipFree(d_weights) == hipSuccess);
assert(hipHostFree(h_sink_input) == hipSuccess);
assert(hipFree(d_sink_input) == hipSuccess);
}
void test_backwardPropogationGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<IntegrationErrorTensorOp<float, Eigen::GpuDevice>> integration_function = std::make_shared<SumErrorTensorOp<float, Eigen::GpuDevice>>(SumErrorTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 0;
float* h_source_errors;
float* d_source_errors;
float* h_source_inputs;
float* d_source_inputs;
float* h_weights;
float* d_weights;
float* h_sink_error;
float* d_sink_error;
float* h_sink_output;
float* d_sink_output;
float* h_sink_derivative;
float* d_sink_derivative;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float);
std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_source_errors), source_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_source_errors), source_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_source_inputs), source_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_source_inputs), source_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_weights), weight_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_weights), weight_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_sink_error), sink_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_sink_error), sink_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_sink_derivative), sink_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_sink_derivative), sink_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_sink_output), sink_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_sink_output), sink_bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_error(h_source_errors, batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_input(h_source_inputs, batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weights, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_derivative(h_sink_derivative, batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_error(h_sink_error, batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_output(h_sink_output, batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeBackwardPropogation(
h_source_errors,
d_source_errors,
h_source_inputs,
d_source_inputs,
h_sink_output,
d_sink_output,
h_weights,
d_weights,
h_sink_error,
d_sink_error,
h_sink_derivative,
d_sink_derivative,
source_layer_size,
integration_function,
batch_size,
memory_size,
source_layer_size,
sink_layer_size,
source_time_step,
sink_time_step,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 3> expected_error(batch_size, memory_size, sink_layer_size);
expected_error.setValues({ {{4}, {0}},
{{8}, {0}},
{{12}, {0}},
{{16}, {0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < sink_layer_size; ++node_iter) {
//std::cout << "[Sink Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << sink_error(batch_iter, memory_iter, node_iter) << std::endl;
assert(sink_error(batch_iter, memory_iter, node_iter) == expected_error(batch_iter, memory_iter, node_iter));
}
}
}
assert(hipHostFree(h_source_errors) == hipSuccess);
assert(hipFree(d_source_errors) == hipSuccess);
assert(hipHostFree(h_source_inputs) == hipSuccess);
assert(hipFree(d_source_inputs) == hipSuccess);
assert(hipHostFree(h_weights) == hipSuccess);
assert(hipFree(d_weights) == hipSuccess);
assert(hipHostFree(h_sink_error) == hipSuccess);
assert(hipFree(d_sink_error) == hipSuccess);
assert(hipHostFree(h_sink_derivative) == hipSuccess);
assert(hipFree(d_sink_derivative) == hipSuccess);
assert(hipHostFree(h_sink_output) == hipSuccess);
assert(hipFree(d_sink_output) == hipSuccess);
}
void test_modelErrorGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<LossFunctionTensorOp<float, Eigen::GpuDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::GpuDevice>>(MSELossTensorOp<float, Eigen::GpuDevice>());
std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::GpuDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::GpuDevice>>(MSELossGradTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int time_step = 0;
float* h_predicted;
float* d_predicted;
float* h_node_errors;
float* d_node_errors;
float* h_model_error;
float* d_model_error;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
std::size_t model_bytes = batch_size * memory_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_predicted), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_predicted), bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_node_errors), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_node_errors), bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_model_error), model_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_model_error), model_bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> predicted(h_predicted, batch_size, memory_size, layer_size);
predicted.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> model_error(h_model_error, batch_size, memory_size);
model_error.setConstant(0);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_error(h_node_errors, batch_size, memory_size, layer_size);
node_error.setConstant(0);
Eigen::Tensor<float, 2> expected(batch_size, layer_size);
expected.setConstant(1);
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeModelErrors(
expected,
h_predicted,
d_predicted,
h_model_error,
d_model_error,
h_node_errors,
d_node_errors,
loss_function,
loss_grad_function,
batch_size,
memory_size,
layer_size,
time_step,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 2> expected_model_error(batch_size, memory_size);
expected_model_error.setValues({ {0, 0}, {0.5, 0}, {2.0, 0}, {4.5, 0} });
Eigen::Tensor<float, 3> expected_node_error(batch_size, memory_size, layer_size);
expected_node_error.setValues({
{ {0, 0 }, { 0, 0 } },
{ {-0.5, -0.5 }, { 0, 0 } },
{ {-1, -1 }, { 0, 0 } },
{ {-1.5, -1.5 }, { 0, 0 } } });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
//std::cout << "[Model Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << " = " << model_error(batch_iter, memory_iter) << std::endl;
assert(model_error(batch_iter, memory_iter) == expected_model_error(batch_iter, memory_iter));
for (int node_iter = 0; node_iter < layer_size; ++node_iter) {
//std::cout << "[Node Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_error(batch_iter, memory_iter, node_iter) << std::endl;
assert(node_error(batch_iter, memory_iter, node_iter) == expected_node_error(batch_iter, memory_iter, node_iter));
}
}
}
assert(hipHostFree(h_predicted) == hipSuccess);
assert(hipFree(d_predicted) == hipSuccess);
assert(hipHostFree(h_node_errors) == hipSuccess);
assert(hipFree(d_node_errors) == hipSuccess);
assert(hipHostFree(h_model_error) == hipSuccess);
assert(hipFree(d_model_error) == hipSuccess);
}
void test_modelMetricGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<MetricFunctionTensorOp<float, Eigen::GpuDevice>> metric_function = std::make_shared<MAETensorOp<float, Eigen::GpuDevice>>(MAETensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int n_metrics = 1;
const int time_step = 0;
const int metric_index = 0;
float* h_predicted;
float* d_predicted;
float* h_model_metric;
float* d_model_metric;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
std::size_t model_bytes = n_metrics * memory_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_predicted), bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_predicted), bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_model_metric), model_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_model_metric), model_bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> predicted(h_predicted, batch_size, memory_size, layer_size);
predicted.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> model_metric(h_model_metric, n_metrics, memory_size);
model_metric.setConstant(0);
Eigen::Tensor<float, 2> expected(batch_size, layer_size);
expected.setConstant(1);
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeModelMetric(
expected,
h_predicted,
d_predicted,
h_model_metric,
d_model_metric,
metric_function,
batch_size,
memory_size,
layer_size,
n_metrics,
time_step,
metric_index,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 2> expected_model_metric(batch_size, memory_size);
expected_model_metric.setValues({ {1.5, 0} });
for (int metric_iter = 0; metric_iter < n_metrics; ++metric_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
//std::cout << "[Model Metric] Metric iter: " << metric_iter << ", Memory Iter: " << memory_iter << " = " << model_metric(metric_iter, memory_iter) << std::endl;
assert(model_metric(metric_iter, memory_iter) == expected_model_metric(metric_iter, memory_iter));
}
}
assert(hipHostFree(h_predicted) == hipSuccess);
assert(hipFree(d_predicted) == hipSuccess);
assert(hipHostFree(h_model_metric) == hipSuccess);
assert(hipFree(d_model_metric) == hipSuccess);
}
void test_weightErrorGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<IntegrationWeightGradTensorOp<float, Eigen::GpuDevice>> integration_function = std::make_shared<SumWeightGradTensorOp<float, Eigen::GpuDevice>>(SumWeightGradTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
float* h_sink_errors;
float* d_sink_errors;
float* h_source_outputs;
float* d_source_outputs;
float* h_source_inputs;
float* d_source_inputs;
float* h_weight;
float* d_weight;
float* h_weight_error;
float* d_weight_error;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float);
std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_sink_errors), sink_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_sink_errors), sink_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_source_outputs), source_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_source_outputs), source_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_source_inputs), source_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_source_inputs), source_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_weight), weight_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_weight), weight_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_weight_error), weight_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_weight_error), weight_bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_error(h_sink_errors, batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_output(h_source_outputs, batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_input(h_source_inputs, batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weight, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size);
weight_error.setConstant(0);
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeWeightErrors(
h_sink_errors,
d_sink_errors,
h_source_outputs,
d_source_outputs,
h_source_inputs,
d_source_inputs,
source_layer_size,
integration_function,
h_weight,
d_weight,
h_weight_error,
d_weight_error,
batch_size,
memory_size,
source_layer_size,
sink_layer_size,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 2> expected_weight_error(source_layer_size, sink_layer_size);
expected_weight_error.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(weight_error(source_iter, sink_iter) == expected_weight_error(source_iter, sink_iter));
}
}
assert(hipHostFree(h_sink_errors) == hipSuccess);
assert(hipFree(d_sink_errors) == hipSuccess);
assert(hipHostFree(h_source_outputs) == hipSuccess);
assert(hipFree(d_source_outputs) == hipSuccess);
assert(hipHostFree(h_source_inputs) == hipSuccess);
assert(hipFree(d_source_inputs) == hipSuccess);
assert(hipHostFree(h_weight) == hipSuccess);
assert(hipFree(d_weight) == hipSuccess);
assert(hipHostFree(h_weight_error) == hipSuccess);
assert(hipFree(d_weight_error) == hipSuccess);
}
void test_sharedWeightErrorGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
const int source_layer_size = 2;
const int sink_layer_size = 2;
const int n_shared_weights = 1;
float* h_shared_weights;
float* d_shared_weights;
float* h_weight_error;
float* d_weight_error;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t shared_weights_bytes = source_layer_size * sink_layer_size * n_shared_weights * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_shared_weights), shared_weights_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_shared_weights), shared_weights_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_weight_error), weight_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_weight_error), weight_bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> shared_weights(h_shared_weights, source_layer_size, sink_layer_size, n_shared_weights);
shared_weights.setValues({
{{1}, {1}},
{{0}, {0}}
});
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size);
weight_error.setValues({ {1, 2}, {3, 4} });
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeSharedWeightErrors(
h_weight_error,
d_weight_error,
h_shared_weights,
d_shared_weights,
source_layer_size,
sink_layer_size,
n_shared_weights,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 2> expected_weight_error(source_layer_size, sink_layer_size);
expected_weight_error.setValues({ {3, 3}, {3, 4} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(weight_error(source_iter, sink_iter) == expected_weight_error(source_iter, sink_iter));
}
}
assert(hipHostFree(h_shared_weights) == hipSuccess);
assert(hipFree(d_shared_weights) == hipSuccess);
assert(hipHostFree(h_weight_error) == hipSuccess);
assert(hipFree(d_weight_error) == hipSuccess);
}
void test_weightUpdateGpuDevice(){
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<SolverTensorOp<float, Eigen::GpuDevice>> solver_function = std::make_shared<SGDTensorOp<float, Eigen::GpuDevice>>(SGDTensorOp<float, Eigen::GpuDevice>());
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int iter = 0;
float* h_solver_params = new float[source_layer_size * sink_layer_size * 3];
float* d_solver_params = new float[source_layer_size * sink_layer_size * 3];
float* h_weight;
float* d_weight;
float* h_weight_error;
float* d_weight_error;
assert(hipSetDevice(device_id) == hipSuccess); // is this needed?
// allocate memory
std::size_t solver_bytes = source_layer_size * sink_layer_size * 3 * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(hipHostMalloc((void**)(&h_solver_params), solver_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_solver_params), solver_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_weight), weight_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_weight), weight_bytes) == hipSuccess);
assert(hipHostMalloc((void**)(&h_weight_error), weight_bytes, hipHostMallocDefault) == hipSuccess);
assert(hipMalloc((void**)(&d_weight_error), weight_bytes) == hipSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> solver_params(h_solver_params, source_layer_size, sink_layer_size, 3);
solver_params.setValues({ {{0.01, 0.99, 0.0}},
{{0.01, 0.99, 0.0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weight, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size);
weight_error.setValues({ {-0.2}, {-20} });
// Set up the device
hipStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeWeightUpdate(
h_weight,
d_weight,
h_solver_params,
d_solver_params,
h_weight_error,
d_weight_error,
solver_function,
source_layer_size,
sink_layer_size,
iter,
device,
true,
true);
// Synchronize the stream
hipError_t err = hipStreamQuery(stream);
assert(hipStreamSynchronize(stream) == hipSuccess);
assert(hipStreamDestroy(stream) == hipSuccess);
Eigen::Tensor<float, 2> expected_weights(source_layer_size, sink_layer_size);
expected_weights.setValues({ {1.002}, {1.2} });
Eigen::Tensor<float, 3> expected_params(source_layer_size, sink_layer_size, 3);
expected_params.setValues({ {{0.01, 0.99, -0.002}},
{{0.01, 0.99, -0.2}} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight(source_iter, sink_iter) << std::endl;
assert(assert_close(weight(source_iter, sink_iter),expected_weights(source_iter, sink_iter)));
for (int param_iter = 0; param_iter < 2; ++param_iter) { // [NOTE: should be `param_iter < 3`]
//std::cout << "[Params] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << ", Param Iter: " << param_iter << " = " << solver_params(source_iter, sink_iter, param_iter) << std::endl;
assert(assert_close(solver_params(source_iter, sink_iter, param_iter),expected_params(source_iter, sink_iter, param_iter))); // Not sure why the last param does not pass...
}
}
}
assert(hipHostFree(h_solver_params) == hipSuccess);
assert(hipFree(d_solver_params) == hipSuccess);
assert(hipHostFree(h_weight) == hipSuccess);
assert(hipFree(d_weight) == hipSuccess);
assert(hipHostFree(h_weight_error) == hipSuccess);
assert(hipFree(d_weight_error) == hipSuccess);
}
int main(int argc, char** argv)
{
test_constructorGpuDevice();
test_destructorGpuDevice();
test_nodeActivationGpuDevice();
test_nodeDerivativeGpuDevice();
test_forwardPropogationGpuDevice();
test_backwardPropogationGpuDevice();
test_modelErrorGpuDevice();
test_modelMetricGpuDevice();
test_weightErrorGpuDevice();
test_sharedWeightErrorGpuDevice();
test_weightUpdateGpuDevice();
return 0;
}
#endif
|
f4b9c6aa5df79e40dddf611edd691399336553c8.cu
|
/**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <cuda.h>
#include <cuda_runtime.h>
#include <EvoNet/ml/ModelKernalGpu.h>
using namespace EvoNet;
using namespace std;
void test_constructorGpuDevice()
{
ModelKernalGpu<float>* ptr = nullptr;
ModelKernalGpu<float>* nullPointer = nullptr;
ptr = new ModelKernalGpu<float>();
assert(ptr != nullPointer);
}
void test_destructorGpuDevice()
{
ModelKernalGpu<float>* ptr = nullptr;
ptr = new ModelKernalGpu<float>();
delete ptr;
}
void test_nodeActivationGpuDevice()
{
ModelKernalGpu<float> kernal;
const int device_id = 0;
std::shared_ptr<ActivationTensorOp<float, Eigen::GpuDevice>> activation_function = std::make_shared<ReLUTensorOp<float, Eigen::GpuDevice>>(ReLUTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int source_time_step = 0;
const int node_time_step = 0;
float* h_node_input;
float* d_node_input;
float* h_node_output;
float* d_node_output;
float* h_node_dt;
float* d_node_dt;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_node_input), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_node_input), bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_node_output), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_node_output), bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_node_dt), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_node_dt), bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_input(h_node_input, batch_size, memory_size, layer_size);
node_input.setValues({ {{-1, 1}, {0, 0}},
{{-2, 2}, {0, 0}},
{{-3, 3}, {0, 0}},
{{-4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_output(h_node_output, batch_size, memory_size, layer_size);
node_output.setConstant(0);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_dt(h_node_dt, batch_size, memory_size, layer_size);
node_dt.setConstant(1);
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeNodeActivation(
h_node_input,
d_node_input,
h_node_output,
d_node_output,
h_node_dt,
d_node_dt,
activation_function,
batch_size,
memory_size,
layer_size,
node_time_step,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 3> expected_output(batch_size, memory_size, layer_size);
expected_output.setValues({ {{0, 1}, {0, 0}},
{{0, 2}, {0, 0}},
{{0, 3}, {0, 0}},
{{0, 4}, {0, 0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < layer_size; ++node_iter) {
//std::cout << "[Output] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_output(batch_iter, memory_iter, node_iter) << std::endl;
assert(node_output(batch_iter, memory_iter, node_iter) == expected_output(batch_iter, memory_iter, node_iter));
}
}
}
// release resources
assert(cudaFreeHost(h_node_input) == cudaSuccess);
assert(cudaFree(d_node_input) == cudaSuccess);
assert(cudaFreeHost(h_node_output) == cudaSuccess);
assert(cudaFree(d_node_output) == cudaSuccess);
assert(cudaFreeHost(h_node_dt) == cudaSuccess);
assert(cudaFree(d_node_dt) == cudaSuccess);
}
void test_nodeDerivativeGpuDevice()
{
ModelKernalGpu<float> kernal;
const int device_id = 0;
std::shared_ptr<ActivationTensorOp<float, Eigen::GpuDevice>> activation_grad_function = std::make_shared<ReLUGradTensorOp<float, Eigen::GpuDevice>>(ReLUGradTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int source_time_step = 0;
const int node_time_step = 0;
float* h_node_output;
float* d_node_output;
float* h_node_derivative;
float* d_node_derivative;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_node_output), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_node_output), bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_node_derivative), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_node_derivative), bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_output(h_node_output, batch_size, memory_size, layer_size);
node_output.setValues({ {{-1, 1}, {0, 0}},
{{-2, 2}, {0, 0}},
{{-3, 3}, {0, 0}},
{{-4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_derivative(h_node_derivative, batch_size, memory_size, layer_size);
node_derivative.setConstant(0);
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeNodeDerivative(
h_node_output,
d_node_output,
h_node_derivative,
d_node_derivative,
activation_grad_function,
batch_size,
memory_size,
layer_size,
node_time_step,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 3> expected_derivative(batch_size, memory_size, layer_size);
expected_derivative.setValues({ {{0, 1}, {0, 0}},
{{0, 1}, {0, 0}},
{{0, 1}, {0, 0}},
{{0, 1}, {0, 0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < layer_size; ++node_iter) {
//std::cout << "[Derivative] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_derivative(batch_iter, memory_iter, node_iter) << std::endl;
assert(node_derivative(batch_iter, memory_iter, node_iter) == expected_derivative(batch_iter, memory_iter, node_iter));
}
}
}
// release resources
assert(cudaFreeHost(h_node_output) == cudaSuccess);
assert(cudaFree(d_node_output) == cudaSuccess);
assert(cudaFreeHost(h_node_derivative) == cudaSuccess);
assert(cudaFree(d_node_derivative) == cudaSuccess);
}
void test_forwardPropogationGpuDevice()
{
ModelKernalGpu<float> kernal;
const int device_id = 0;
std::shared_ptr<IntegrationTensorOp<float, Eigen::GpuDevice>> integration_function = std::make_shared<SumTensorOp<float, Eigen::GpuDevice>>(SumTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_steps = 0;
const int sink_time_step = 0;
float* h_source_outputs;
float* d_source_outputs;
float* h_weights;
float* d_weights;
float* h_sink_input;
float* d_sink_input;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float);
std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_source_outputs), source_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_source_outputs), source_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_weights), weight_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_weights), weight_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_sink_input), sink_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_sink_input), sink_bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_output(h_source_outputs, batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weights, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_input(h_sink_input, batch_size, memory_size, sink_layer_size);
sink_input.setConstant(0);
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeForwardPropogation(
h_source_outputs,
d_source_outputs,
h_weights,
d_weights,
h_sink_input,
d_sink_input,
integration_function,
batch_size,
memory_size,
source_layer_size,
sink_layer_size,
source_time_steps,
sink_time_step,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 3> expected_input(batch_size, memory_size, sink_layer_size);
expected_input.setValues({ {{2}, {0}},
{{4}, {0}},
{{6}, {0}},
{{8}, {0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < sink_layer_size; ++node_iter) {
//std::cout << "[Input] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << sink_input(batch_iter, memory_iter, node_iter) << std::endl;
assert(sink_input(batch_iter, memory_iter, node_iter) == expected_input(batch_iter, memory_iter, node_iter));
}
}
}
// release resources
assert(cudaFreeHost(h_source_outputs) == cudaSuccess);
assert(cudaFree(d_source_outputs) == cudaSuccess);
assert(cudaFreeHost(h_weights) == cudaSuccess);
assert(cudaFree(d_weights) == cudaSuccess);
assert(cudaFreeHost(h_sink_input) == cudaSuccess);
assert(cudaFree(d_sink_input) == cudaSuccess);
}
void test_backwardPropogationGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<IntegrationErrorTensorOp<float, Eigen::GpuDevice>> integration_function = std::make_shared<SumErrorTensorOp<float, Eigen::GpuDevice>>(SumErrorTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int source_time_step = 0;
const int sink_time_step = 0;
float* h_source_errors;
float* d_source_errors;
float* h_source_inputs;
float* d_source_inputs;
float* h_weights;
float* d_weights;
float* h_sink_error;
float* d_sink_error;
float* h_sink_output;
float* d_sink_output;
float* h_sink_derivative;
float* d_sink_derivative;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float);
std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_source_errors), source_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_source_errors), source_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_source_inputs), source_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_source_inputs), source_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_weights), weight_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_weights), weight_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_sink_error), sink_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_sink_error), sink_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_sink_derivative), sink_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_sink_derivative), sink_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_sink_output), sink_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_sink_output), sink_bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_error(h_source_errors, batch_size, memory_size, source_layer_size);
source_error.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_input(h_source_inputs, batch_size, memory_size, source_layer_size);
source_input.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weights, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_derivative(h_sink_derivative, batch_size, memory_size, sink_layer_size);
sink_derivative.setConstant(2);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_error(h_sink_error, batch_size, memory_size, sink_layer_size);
sink_error.setConstant(0);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_output(h_sink_output, batch_size, memory_size, sink_layer_size);
sink_output.setConstant(1);
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeBackwardPropogation(
h_source_errors,
d_source_errors,
h_source_inputs,
d_source_inputs,
h_sink_output,
d_sink_output,
h_weights,
d_weights,
h_sink_error,
d_sink_error,
h_sink_derivative,
d_sink_derivative,
source_layer_size,
integration_function,
batch_size,
memory_size,
source_layer_size,
sink_layer_size,
source_time_step,
sink_time_step,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 3> expected_error(batch_size, memory_size, sink_layer_size);
expected_error.setValues({ {{4}, {0}},
{{8}, {0}},
{{12}, {0}},
{{16}, {0}} });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
for (int node_iter = 0; node_iter < sink_layer_size; ++node_iter) {
//std::cout << "[Sink Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << sink_error(batch_iter, memory_iter, node_iter) << std::endl;
assert(sink_error(batch_iter, memory_iter, node_iter) == expected_error(batch_iter, memory_iter, node_iter));
}
}
}
assert(cudaFreeHost(h_source_errors) == cudaSuccess);
assert(cudaFree(d_source_errors) == cudaSuccess);
assert(cudaFreeHost(h_source_inputs) == cudaSuccess);
assert(cudaFree(d_source_inputs) == cudaSuccess);
assert(cudaFreeHost(h_weights) == cudaSuccess);
assert(cudaFree(d_weights) == cudaSuccess);
assert(cudaFreeHost(h_sink_error) == cudaSuccess);
assert(cudaFree(d_sink_error) == cudaSuccess);
assert(cudaFreeHost(h_sink_derivative) == cudaSuccess);
assert(cudaFree(d_sink_derivative) == cudaSuccess);
assert(cudaFreeHost(h_sink_output) == cudaSuccess);
assert(cudaFree(d_sink_output) == cudaSuccess);
}
void test_modelErrorGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<LossFunctionTensorOp<float, Eigen::GpuDevice>> loss_function = std::make_shared<MSELossTensorOp<float, Eigen::GpuDevice>>(MSELossTensorOp<float, Eigen::GpuDevice>());
std::shared_ptr<LossFunctionGradTensorOp<float, Eigen::GpuDevice>> loss_grad_function = std::make_shared<MSELossGradTensorOp<float, Eigen::GpuDevice>>(MSELossGradTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int time_step = 0;
float* h_predicted;
float* d_predicted;
float* h_node_errors;
float* d_node_errors;
float* h_model_error;
float* d_model_error;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
std::size_t model_bytes = batch_size * memory_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_predicted), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_predicted), bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_node_errors), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_node_errors), bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_model_error), model_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_model_error), model_bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> predicted(h_predicted, batch_size, memory_size, layer_size);
predicted.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> model_error(h_model_error, batch_size, memory_size);
model_error.setConstant(0);
Eigen::TensorMap<Eigen::Tensor<float, 3>> node_error(h_node_errors, batch_size, memory_size, layer_size);
node_error.setConstant(0);
Eigen::Tensor<float, 2> expected(batch_size, layer_size);
expected.setConstant(1);
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeModelErrors(
expected,
h_predicted,
d_predicted,
h_model_error,
d_model_error,
h_node_errors,
d_node_errors,
loss_function,
loss_grad_function,
batch_size,
memory_size,
layer_size,
time_step,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 2> expected_model_error(batch_size, memory_size);
expected_model_error.setValues({ {0, 0}, {0.5, 0}, {2.0, 0}, {4.5, 0} });
Eigen::Tensor<float, 3> expected_node_error(batch_size, memory_size, layer_size);
expected_node_error.setValues({
{ {0, 0 }, { 0, 0 } },
{ {-0.5, -0.5 }, { 0, 0 } },
{ {-1, -1 }, { 0, 0 } },
{ {-1.5, -1.5 }, { 0, 0 } } });
for (int batch_iter = 0; batch_iter < batch_size; ++batch_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
//std::cout << "[Model Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << " = " << model_error(batch_iter, memory_iter) << std::endl;
assert(model_error(batch_iter, memory_iter) == expected_model_error(batch_iter, memory_iter));
for (int node_iter = 0; node_iter < layer_size; ++node_iter) {
//std::cout << "[Node Error] Batch iter: " << batch_iter << ", Memory Iter: " << memory_iter << ", Node Iter: " << node_iter << " = " << node_error(batch_iter, memory_iter, node_iter) << std::endl;
assert(node_error(batch_iter, memory_iter, node_iter) == expected_node_error(batch_iter, memory_iter, node_iter));
}
}
}
assert(cudaFreeHost(h_predicted) == cudaSuccess);
assert(cudaFree(d_predicted) == cudaSuccess);
assert(cudaFreeHost(h_node_errors) == cudaSuccess);
assert(cudaFree(d_node_errors) == cudaSuccess);
assert(cudaFreeHost(h_model_error) == cudaSuccess);
assert(cudaFree(d_model_error) == cudaSuccess);
}
void test_modelMetricGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<MetricFunctionTensorOp<float, Eigen::GpuDevice>> metric_function = std::make_shared<MAETensorOp<float, Eigen::GpuDevice>>(MAETensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int layer_size = 2;
const int n_metrics = 1;
const int time_step = 0;
const int metric_index = 0;
float* h_predicted;
float* d_predicted;
float* h_model_metric;
float* d_model_metric;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t bytes = batch_size * memory_size * layer_size * sizeof(float);
std::size_t model_bytes = n_metrics * memory_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_predicted), bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_predicted), bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_model_metric), model_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_model_metric), model_bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> predicted(h_predicted, batch_size, memory_size, layer_size);
predicted.setValues({ {{1, 1}, {0, 0}},
{{2, 2}, {0, 0}},
{{3, 3}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> model_metric(h_model_metric, n_metrics, memory_size);
model_metric.setConstant(0);
Eigen::Tensor<float, 2> expected(batch_size, layer_size);
expected.setConstant(1);
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeModelMetric(
expected,
h_predicted,
d_predicted,
h_model_metric,
d_model_metric,
metric_function,
batch_size,
memory_size,
layer_size,
n_metrics,
time_step,
metric_index,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 2> expected_model_metric(batch_size, memory_size);
expected_model_metric.setValues({ {1.5, 0} });
for (int metric_iter = 0; metric_iter < n_metrics; ++metric_iter) {
for (int memory_iter = 0; memory_iter < memory_size; ++memory_iter) {
//std::cout << "[Model Metric] Metric iter: " << metric_iter << ", Memory Iter: " << memory_iter << " = " << model_metric(metric_iter, memory_iter) << std::endl;
assert(model_metric(metric_iter, memory_iter) == expected_model_metric(metric_iter, memory_iter));
}
}
assert(cudaFreeHost(h_predicted) == cudaSuccess);
assert(cudaFree(d_predicted) == cudaSuccess);
assert(cudaFreeHost(h_model_metric) == cudaSuccess);
assert(cudaFree(d_model_metric) == cudaSuccess);
}
void test_weightErrorGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<IntegrationWeightGradTensorOp<float, Eigen::GpuDevice>> integration_function = std::make_shared<SumWeightGradTensorOp<float, Eigen::GpuDevice>>(SumWeightGradTensorOp<float, Eigen::GpuDevice>());
const int batch_size = 4;
const int memory_size = 2;
const int source_layer_size = 2;
const int sink_layer_size = 1;
float* h_sink_errors;
float* d_sink_errors;
float* h_source_outputs;
float* d_source_outputs;
float* h_source_inputs;
float* d_source_inputs;
float* h_weight;
float* d_weight;
float* h_weight_error;
float* d_weight_error;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t source_bytes = batch_size * memory_size * source_layer_size * sizeof(float);
std::size_t sink_bytes = batch_size * memory_size * sink_layer_size * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_sink_errors), sink_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_sink_errors), sink_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_source_outputs), source_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_source_outputs), source_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_source_inputs), source_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_source_inputs), source_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_weight), weight_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_weight), weight_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_weight_error), weight_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_weight_error), weight_bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> sink_error(h_sink_errors, batch_size, memory_size, sink_layer_size);
sink_error.setValues({ {{1}, {1}},
{{2}, {1}},
{{3}, {0}},
{{4}, {0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_output(h_source_outputs, batch_size, memory_size, source_layer_size);
source_output.setValues({ {{1, 1}, {1, 1}},
{{2, 2}, {2, 2}},
{{1, 1}, {0, 0}},
{{2, 2}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 3>> source_input(h_source_inputs, batch_size, memory_size, source_layer_size);
source_input.setValues({ {{2, 2}, {0, 0}},
{{4, 4}, {0, 0}},
{{2, 2}, {0, 0}},
{{4, 4}, {0, 0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weight, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size);
weight_error.setConstant(0);
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeWeightErrors(
h_sink_errors,
d_sink_errors,
h_source_outputs,
d_source_outputs,
h_source_inputs,
d_source_inputs,
source_layer_size,
integration_function,
h_weight,
d_weight,
h_weight_error,
d_weight_error,
batch_size,
memory_size,
source_layer_size,
sink_layer_size,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 2> expected_weight_error(source_layer_size, sink_layer_size);
expected_weight_error.setValues({ {-4.75}, {-4.75} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(weight_error(source_iter, sink_iter) == expected_weight_error(source_iter, sink_iter));
}
}
assert(cudaFreeHost(h_sink_errors) == cudaSuccess);
assert(cudaFree(d_sink_errors) == cudaSuccess);
assert(cudaFreeHost(h_source_outputs) == cudaSuccess);
assert(cudaFree(d_source_outputs) == cudaSuccess);
assert(cudaFreeHost(h_source_inputs) == cudaSuccess);
assert(cudaFree(d_source_inputs) == cudaSuccess);
assert(cudaFreeHost(h_weight) == cudaSuccess);
assert(cudaFree(d_weight) == cudaSuccess);
assert(cudaFreeHost(h_weight_error) == cudaSuccess);
assert(cudaFree(d_weight_error) == cudaSuccess);
}
void test_sharedWeightErrorGpuDevice()
{
const int device_id = 0;
ModelKernalGpu<float> kernal;
const int source_layer_size = 2;
const int sink_layer_size = 2;
const int n_shared_weights = 1;
float* h_shared_weights;
float* d_shared_weights;
float* h_weight_error;
float* d_weight_error;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t shared_weights_bytes = source_layer_size * sink_layer_size * n_shared_weights * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_shared_weights), shared_weights_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_shared_weights), shared_weights_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_weight_error), weight_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_weight_error), weight_bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> shared_weights(h_shared_weights, source_layer_size, sink_layer_size, n_shared_weights);
shared_weights.setValues({
{{1}, {1}},
{{0}, {0}}
});
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size);
weight_error.setValues({ {1, 2}, {3, 4} });
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeSharedWeightErrors(
h_weight_error,
d_weight_error,
h_shared_weights,
d_shared_weights,
source_layer_size,
sink_layer_size,
n_shared_weights,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 2> expected_weight_error(source_layer_size, sink_layer_size);
expected_weight_error.setValues({ {3, 3}, {3, 4} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight Error] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight_error(source_iter, sink_iter) << std::endl;
assert(weight_error(source_iter, sink_iter) == expected_weight_error(source_iter, sink_iter));
}
}
assert(cudaFreeHost(h_shared_weights) == cudaSuccess);
assert(cudaFree(d_shared_weights) == cudaSuccess);
assert(cudaFreeHost(h_weight_error) == cudaSuccess);
assert(cudaFree(d_weight_error) == cudaSuccess);
}
void test_weightUpdateGpuDevice(){
const int device_id = 0;
ModelKernalGpu<float> kernal;
std::shared_ptr<SolverTensorOp<float, Eigen::GpuDevice>> solver_function = std::make_shared<SGDTensorOp<float, Eigen::GpuDevice>>(SGDTensorOp<float, Eigen::GpuDevice>());
const int source_layer_size = 2;
const int sink_layer_size = 1;
const int iter = 0;
float* h_solver_params = new float[source_layer_size * sink_layer_size * 3];
float* d_solver_params = new float[source_layer_size * sink_layer_size * 3];
float* h_weight;
float* d_weight;
float* h_weight_error;
float* d_weight_error;
assert(cudaSetDevice(device_id) == cudaSuccess); // is this needed?
// allocate memory
std::size_t solver_bytes = source_layer_size * sink_layer_size * 3 * sizeof(float);
std::size_t weight_bytes = source_layer_size * sink_layer_size * sizeof(float);
assert(cudaHostAlloc((void**)(&h_solver_params), solver_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_solver_params), solver_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_weight), weight_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_weight), weight_bytes) == cudaSuccess);
assert(cudaHostAlloc((void**)(&h_weight_error), weight_bytes, cudaHostAllocDefault) == cudaSuccess);
assert(cudaMalloc((void**)(&d_weight_error), weight_bytes) == cudaSuccess);
Eigen::TensorMap<Eigen::Tensor<float, 3>> solver_params(h_solver_params, source_layer_size, sink_layer_size, 3);
solver_params.setValues({ {{0.01, 0.99, 0.0}},
{{0.01, 0.99, 0.0}} });
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight(h_weight, source_layer_size, sink_layer_size);
weight.setConstant(1);
Eigen::TensorMap<Eigen::Tensor<float, 2>> weight_error(h_weight_error, source_layer_size, sink_layer_size);
weight_error.setValues({ {-0.2}, {-20} });
// Set up the device
cudaStream_t stream; // The stream will be destroyed by GpuStreamDevice once the function goes out of scope!
assert(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess);
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
bool success = kernal.executeWeightUpdate(
h_weight,
d_weight,
h_solver_params,
d_solver_params,
h_weight_error,
d_weight_error,
solver_function,
source_layer_size,
sink_layer_size,
iter,
device,
true,
true);
// Synchronize the stream
cudaError_t err = cudaStreamQuery(stream);
assert(cudaStreamSynchronize(stream) == cudaSuccess);
assert(cudaStreamDestroy(stream) == cudaSuccess);
Eigen::Tensor<float, 2> expected_weights(source_layer_size, sink_layer_size);
expected_weights.setValues({ {1.002}, {1.2} });
Eigen::Tensor<float, 3> expected_params(source_layer_size, sink_layer_size, 3);
expected_params.setValues({ {{0.01, 0.99, -0.002}},
{{0.01, 0.99, -0.2}} });
for (int source_iter = 0; source_iter < source_layer_size; ++source_iter) {
for (int sink_iter = 0; sink_iter < sink_layer_size; ++sink_iter) {
//std::cout << "[Weight] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << " = " << weight(source_iter, sink_iter) << std::endl;
assert(assert_close(weight(source_iter, sink_iter),expected_weights(source_iter, sink_iter)));
for (int param_iter = 0; param_iter < 2; ++param_iter) { // [NOTE: should be `param_iter < 3`]
//std::cout << "[Params] Source iter: " << source_iter << ", Sink Iter: " << sink_iter << ", Param Iter: " << param_iter << " = " << solver_params(source_iter, sink_iter, param_iter) << std::endl;
assert(assert_close(solver_params(source_iter, sink_iter, param_iter),expected_params(source_iter, sink_iter, param_iter))); // Not sure why the last param does not pass...
}
}
}
assert(cudaFreeHost(h_solver_params) == cudaSuccess);
assert(cudaFree(d_solver_params) == cudaSuccess);
assert(cudaFreeHost(h_weight) == cudaSuccess);
assert(cudaFree(d_weight) == cudaSuccess);
assert(cudaFreeHost(h_weight_error) == cudaSuccess);
assert(cudaFree(d_weight_error) == cudaSuccess);
}
int main(int argc, char** argv)
{
test_constructorGpuDevice();
test_destructorGpuDevice();
test_nodeActivationGpuDevice();
test_nodeDerivativeGpuDevice();
test_forwardPropogationGpuDevice();
test_backwardPropogationGpuDevice();
test_modelErrorGpuDevice();
test_modelMetricGpuDevice();
test_weightErrorGpuDevice();
test_sharedWeightErrorGpuDevice();
test_weightUpdateGpuDevice();
return 0;
}
#endif
|
0306c0ac7d497ed166909cebca7a724127d07acd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <cassert>
#include <algorithm>
__device__ double sumWarpAll(double a) {
// TODO: 1.a) Compute sum of all values within a warp.
// Only the threads with threadIdx.x % warpSize == 0 have to
// return the correct result.
// (although this function operates only on a single warp, it
// will be called with many threads for testing)
// unsigned int laneId = threadIdx.x & 0x1f;
// unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double sum = a;
for(unsigned int s = warpSize/2; s>0; s/=2){
sum += __shfl_xor_sync(0xffffffff, sum, s, warpSize); // optional: warp-level reduce-all operation
// sum += __shfl_down_sync(0xffffffff, sum, s, warpSize);
}
return sum;
}
/// Returns the sum of all values `a` within a warp,
/// with the correct answer returned only by the 0th thread of a warp.
__device__ double sumWarp(double a) {
// TODO: 1.a) Compute sum of all values within a warp.
// Only the threads with threadIdx.x % warpSize == 0 have to
// return the correct result.
// (although this function operates only on a single warp, it
// will be called with many threads for testing)
// unsigned int laneId = threadIdx.x & 0x1f;
// unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double sum = a;
for(unsigned int s = warpSize/2; s>0; s/=2){
// sum += __shfl_xor_sync(0xffffffff, sum, s, warpSize); // optional: warp-level reduce-all operation
sum += __shfl_down_sync(0xffffffff, sum, s, warpSize);
}
return sum;
}
/// Returns the sum of all values `a` within a block,
/// with the correct answer returned only by the 0th thread of a block.
__device__ double sumBlock(double a) {
// TODO: 1.c) Compute the sum of values `a` for all threads within a block.
// Only threadIdx.x == 0 has to return the correct result.
// NOTE: For 1.c) implement either this or `argMaxBlock`!
__shared__ double warp_sums[32];
int tid = threadIdx.x;
double warp_sum = sumWarp(a);
if (tid % 32 == 0){
unsigned int wid = tid / 32;
warp_sums[wid] = warp_sum;
}
__syncthreads();
if (tid < 32){
warp_sum = sumWarp(warp_sums[tid]);
}
__syncthreads();
return warp_sum;
// for (unsigned int s=16; s>0; s/=2)
// {
// if (tid < s) warp_sums[tid] += warp_sums[tid + s];
//
// __syncthreads();
// }
//
// return warp_sums[0];
}
__global__ void sum1M_helper(const double *a, double *b, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double block_sum = sumBlock(idx < N ? a[idx] : 0.0);
if (threadIdx.x == 0){
b[blockIdx.x] = block_sum;
}
/*
hipDeviceSynchronize();
if (blockIdx.x == 0){
block_sum = sumBlock(threadIdx.x < numBlocks ? b[threadIdx.x] : 0.0);
__syncthreads();
if (threadIdx.x == 0) b[0] = block_sum;
}
*/
}
/// Compute the sum of all values aDev[0]..aDev[N-1] for N <= 1024^2 and store the result to bDev[0].
void sum1M(const double *aDev, double *bDev, int N) {
assert(N <= 1024 * 1024);
// TODO: 1.d) Implement either this or `argMax1M`.
// Avoid copying any data back to the host.
// Hint: The solution requires more CUDA operations than just
// calling a single kernel. Feel free to use whatever you find
// necessary.
int numBlocks = (N + 1024 -1 ) / 1024; // < 1024
hipLaunchKernelGGL(( sum1M_helper), dim3(numBlocks), dim3(1024), 0, 0, aDev, bDev, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( sum1M_helper), dim3(1), dim3(1024), 0, 0, bDev, bDev, 1024);
}
#include "reduction_sum.h"
int main() {
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 3);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 32);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 320);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 1023123);
printf("sumWarp OK.\n");
// OPTIONAL: 1a reduce-all. In case you want to try to implement it,
// implement a global function `__device__ double sumWarpAll(double x)`,
// and comment out sumWarpAll* functions in utils.h.
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 3);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 32);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 320);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 1023123);
printf("sumWarpAll OK.\n");
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 32);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1024);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 12341);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1012311);
printf("sumBlock OK.\n");
testLargeSum("sum1M", sum1M, 32);
testLargeSum("sum1M", sum1M, 1024);
testLargeSum("sum1M", sum1M, 12341);
testLargeSum("sum1M", sum1M, 1012311);
printf("sum1M OK.\n");
}
|
0306c0ac7d497ed166909cebca7a724127d07acd.cu
|
#include "utils.h"
#include <cassert>
#include <algorithm>
__device__ double sumWarpAll(double a) {
// TODO: 1.a) Compute sum of all values within a warp.
// Only the threads with threadIdx.x % warpSize == 0 have to
// return the correct result.
// (although this function operates only on a single warp, it
// will be called with many threads for testing)
// unsigned int laneId = threadIdx.x & 0x1f;
// unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double sum = a;
for(unsigned int s = warpSize/2; s>0; s/=2){
sum += __shfl_xor_sync(0xffffffff, sum, s, warpSize); // optional: warp-level reduce-all operation
// sum += __shfl_down_sync(0xffffffff, sum, s, warpSize);
}
return sum;
}
/// Returns the sum of all values `a` within a warp,
/// with the correct answer returned only by the 0th thread of a warp.
__device__ double sumWarp(double a) {
// TODO: 1.a) Compute sum of all values within a warp.
// Only the threads with threadIdx.x % warpSize == 0 have to
// return the correct result.
// (although this function operates only on a single warp, it
// will be called with many threads for testing)
// unsigned int laneId = threadIdx.x & 0x1f;
// unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double sum = a;
for(unsigned int s = warpSize/2; s>0; s/=2){
// sum += __shfl_xor_sync(0xffffffff, sum, s, warpSize); // optional: warp-level reduce-all operation
sum += __shfl_down_sync(0xffffffff, sum, s, warpSize);
}
return sum;
}
/// Returns the sum of all values `a` within a block,
/// with the correct answer returned only by the 0th thread of a block.
__device__ double sumBlock(double a) {
// TODO: 1.c) Compute the sum of values `a` for all threads within a block.
// Only threadIdx.x == 0 has to return the correct result.
// NOTE: For 1.c) implement either this or `argMaxBlock`!
__shared__ double warp_sums[32];
int tid = threadIdx.x;
double warp_sum = sumWarp(a);
if (tid % 32 == 0){
unsigned int wid = tid / 32;
warp_sums[wid] = warp_sum;
}
__syncthreads();
if (tid < 32){
warp_sum = sumWarp(warp_sums[tid]);
}
__syncthreads();
return warp_sum;
// for (unsigned int s=16; s>0; s/=2)
// {
// if (tid < s) warp_sums[tid] += warp_sums[tid + s];
//
// __syncthreads();
// }
//
// return warp_sums[0];
}
__global__ void sum1M_helper(const double *a, double *b, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double block_sum = sumBlock(idx < N ? a[idx] : 0.0);
if (threadIdx.x == 0){
b[blockIdx.x] = block_sum;
}
/*
cudaDeviceSynchronize();
if (blockIdx.x == 0){
block_sum = sumBlock(threadIdx.x < numBlocks ? b[threadIdx.x] : 0.0);
__syncthreads();
if (threadIdx.x == 0) b[0] = block_sum;
}
*/
}
/// Compute the sum of all values aDev[0]..aDev[N-1] for N <= 1024^2 and store the result to bDev[0].
void sum1M(const double *aDev, double *bDev, int N) {
assert(N <= 1024 * 1024);
// TODO: 1.d) Implement either this or `argMax1M`.
// Avoid copying any data back to the host.
// Hint: The solution requires more CUDA operations than just
// calling a single kernel. Feel free to use whatever you find
// necessary.
int numBlocks = (N + 1024 -1 ) / 1024; // < 1024
sum1M_helper<<<numBlocks, 1024>>>(aDev, bDev, N);
cudaDeviceSynchronize();
sum1M_helper<<<1, 1024>>>(bDev, bDev, 1024);
}
#include "reduction_sum.h"
int main() {
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 3);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 32);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 320);
testSmallSum(sumWarpTestKernel, sumWarpCheck, 32, 1023123);
printf("sumWarp OK.\n");
// OPTIONAL: 1a reduce-all. In case you want to try to implement it,
// implement a global function `__device__ double sumWarpAll(double x)`,
// and comment out sumWarpAll* functions in utils.h.
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 3);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 32);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 320);
testSmallSum(sumWarpAllTestKernel, sumWarpAllCheck, 1, 1023123);
printf("sumWarpAll OK.\n");
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 32);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1024);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 12341);
testSmallSum(sumBlockTestKernel, sumBlockCheck, 1024, 1012311);
printf("sumBlock OK.\n");
testLargeSum("sum1M", sum1M, 32);
testLargeSum("sum1M", sum1M, 1024);
testLargeSum("sum1M", sum1M, 12341);
testLargeSum("sum1M", sum1M, 1012311);
printf("sum1M OK.\n");
}
|
e13ff7dd7ed646b16580b349e6995cbcb2f5c28a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Purpose: Demonstrate matrix multiplication in
* CPU and GPU with global memory and shared memory usage
* Date and time: 04/09/2014
*
* Last modified: Dustin (Ting-Hsuan) Ma
* Date : November 20, 2018
* Author: Inanc Senocak
*
* to compile blas: nvcc -lcublas -O2 gpu_matrixMultiply.cu -o GPU.exe
* to execute: ./matrixMult.exe <m> <n> <k>
*/
#include "rocblas.h"
#include "timer.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
#include <time.h>
#define BLOCKSIZE 16
typedef double REAL;
typedef int INT;
void printMatrix(REAL *matrix, const int nrow, const int ncol)
{
int i, j, idx;
for (j = 0; j < nrow; j++) {
for (i = 0; i < ncol; i++) {
idx = i + j * ncol;
printf("%8.2f ; ", matrix[idx]);
}
printf("\n");
}
printf("\n");
}
void InitializeMatrices(REAL *a, REAL *b, const int M, const int N, const int K)
{
int i, j, idx;
// initialize matrices a & b
for (j = 0; j < M; j++) {
for (i = 0; i < N; i++) {
idx = i + j * N;
a[idx] = (REAL) idx;
}
}
for (j = 0; j < N; j++) {
for (i = 0; i < K; i++) {
idx = i + j * K;
b[idx] = (REAL) idx;
}
}
}
__global__ void matrixMultiplyGPU_gl(REAL *a, REAL *b, REAL *c, const int M, const int N,
const int K)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Row index of matrices a and c
int row = by * BLOCKSIZE + ty;
// Column index of matrices a and b
int col = bx * BLOCKSIZE + tx;
REAL C_temp = 0.;
if (row < M && col < K){
for (int k = 0; k < N; k++)
C_temp += a[k + row * N] * b[col + k * K];
c[col + row * K] = C_temp;
}
}
int main(INT argc, char *argv[])
{
if (argc < 3) {
perror("Command-line usage: executableName <M> <N> <K>");
exit(1);
}
int M = atof(argv[1]);
int N = atof(argv[2]);
int K = atof(argv[3]);
REAL *a_d, *b_d, *c_d, *d_d, *e_d;
hipMallocManaged(&a_d, M * N * sizeof(*a_d));
hipMallocManaged(&b_d, N * K * sizeof(*b_d));
hipMallocManaged(&c_d, M * K * sizeof(*c_d)); // Used for GPU
hipMallocManaged(&d_d, M * K * sizeof(*d_d)); // Used for cublasDDOT
hipMallocManaged(&e_d, M * K * sizeof(*e_d)); // Used for cublasDAXPY
InitializeMatrices(a_d, b_d, M, N, K);
// Setting up GPU enviorment
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
dim3 dimGrid((K + 15) / BLOCKSIZE, (M + 15) / BLOCKSIZE);
float elapsedTime_gpu, elapsedTime_DDOT, elapsedTime_DAXPY;
printf("=====MultKernel=====\n");
hipEvent_t timeStart, timeStop; // WARNING!!! use events only to time the device
hipEventCreate(&timeStart);
hipEventCreate(&timeStop);
hipEventRecord(timeStart, 0);
hipLaunchKernelGGL(( matrixMultiplyGPU_gl), dim3(dimGrid), dim3(dimBlock), 0, 0, a_d, b_d, c_d, M, N, K);
hipDeviceSynchronize();
hipEventRecord(timeStop, 0);
hipEventSynchronize(timeStop);
hipEventElapsedTime(&elapsedTime_gpu, timeStart, timeStop);
printMatrix( c_d, M, K );
//printf("C[2] = %3.1f\n", c_d[2]);
printf("elapsed wall time (GPU) = %5.2f ms\n", elapsedTime_gpu);
printf("=====cublasDDOT=====\n");
hipblasHandle_t handle;
hipblasCreate(&handle);
hipEventRecord(timeStart, 0);
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j++) {
hipblasDdot(handle, N, a_d + j * N, 1, b_d + i, K, d_d + i + j * K);
}
}
hipEventRecord(timeStop, 0);
hipEventSynchronize(timeStop);
hipEventElapsedTime(&elapsedTime_DDOT, timeStart, timeStop);
printMatrix( d_d, M, K );
//printf("D[2] = %3.1f\n", d_d[2]);
printf("elapsed wall time (cublasDDOT) = %5.2f ms\n", elapsedTime_DDOT);
printf("=====cublasDAXPY=====\n");
hipEventRecord(timeStart, 0);
for (int j = 0; j < M; j++) {
for (int i = 0; i < K; i++) {
hipblasDaxpy(handle, M, b_d + j + i * K, a_d + i, N, e_d + j, K);
}
}
hipEventRecord(timeStop, 0);
hipEventSynchronize(timeStop);
hipEventElapsedTime(&elapsedTime_DAXPY, timeStart, timeStop);
printMatrix( e_d, M, K );
//printf("E[2] = %3.1f\n", e_d[2]);
printf("elapsed wall time (cublasDAXPY) = %5.2f ms\n", elapsedTime_DAXPY);
printf("\n");
hipblasDestroy(handle);
// Deallocating Memory
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
hipFree(d_d);
hipFree(e_d);
hipEventDestroy(timeStart);
hipEventDestroy(timeStop);
return (EXIT_SUCCESS);
}
|
e13ff7dd7ed646b16580b349e6995cbcb2f5c28a.cu
|
/*
* Purpose: Demonstrate matrix multiplication in
* CPU and GPU with global memory and shared memory usage
* Date and time: 04/09/2014
*
* Last modified: Dustin (Ting-Hsuan) Ma
* Date : November 20, 2018
* Author: Inanc Senocak
*
* to compile blas: nvcc -lcublas -O2 gpu_matrixMultiply.cu -o GPU.exe
* to execute: ./matrixMult.exe <m> <n> <k>
*/
#include "cublas_v2.h"
#include "timer.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
#include <time.h>
#define BLOCKSIZE 16
typedef double REAL;
typedef int INT;
void printMatrix(REAL *matrix, const int nrow, const int ncol)
{
int i, j, idx;
for (j = 0; j < nrow; j++) {
for (i = 0; i < ncol; i++) {
idx = i + j * ncol;
printf("%8.2f ; ", matrix[idx]);
}
printf("\n");
}
printf("\n");
}
void InitializeMatrices(REAL *a, REAL *b, const int M, const int N, const int K)
{
int i, j, idx;
// initialize matrices a & b
for (j = 0; j < M; j++) {
for (i = 0; i < N; i++) {
idx = i + j * N;
a[idx] = (REAL) idx;
}
}
for (j = 0; j < N; j++) {
for (i = 0; i < K; i++) {
idx = i + j * K;
b[idx] = (REAL) idx;
}
}
}
__global__ void matrixMultiplyGPU_gl(REAL *a, REAL *b, REAL *c, const int M, const int N,
const int K)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Row index of matrices a and c
int row = by * BLOCKSIZE + ty;
// Column index of matrices a and b
int col = bx * BLOCKSIZE + tx;
REAL C_temp = 0.;
if (row < M && col < K){
for (int k = 0; k < N; k++)
C_temp += a[k + row * N] * b[col + k * K];
c[col + row * K] = C_temp;
}
}
int main(INT argc, char *argv[])
{
if (argc < 3) {
perror("Command-line usage: executableName <M> <N> <K>");
exit(1);
}
int M = atof(argv[1]);
int N = atof(argv[2]);
int K = atof(argv[3]);
REAL *a_d, *b_d, *c_d, *d_d, *e_d;
cudaMallocManaged(&a_d, M * N * sizeof(*a_d));
cudaMallocManaged(&b_d, N * K * sizeof(*b_d));
cudaMallocManaged(&c_d, M * K * sizeof(*c_d)); // Used for GPU
cudaMallocManaged(&d_d, M * K * sizeof(*d_d)); // Used for cublasDDOT
cudaMallocManaged(&e_d, M * K * sizeof(*e_d)); // Used for cublasDAXPY
InitializeMatrices(a_d, b_d, M, N, K);
// Setting up GPU enviorment
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
dim3 dimGrid((K + 15) / BLOCKSIZE, (M + 15) / BLOCKSIZE);
float elapsedTime_gpu, elapsedTime_DDOT, elapsedTime_DAXPY;
printf("=====MultKernel=====\n");
cudaEvent_t timeStart, timeStop; // WARNING!!! use events only to time the device
cudaEventCreate(&timeStart);
cudaEventCreate(&timeStop);
cudaEventRecord(timeStart, 0);
matrixMultiplyGPU_gl<<<dimGrid, dimBlock>>>(a_d, b_d, c_d, M, N, K);
cudaDeviceSynchronize();
cudaEventRecord(timeStop, 0);
cudaEventSynchronize(timeStop);
cudaEventElapsedTime(&elapsedTime_gpu, timeStart, timeStop);
printMatrix( c_d, M, K );
//printf("C[2] = %3.1f\n", c_d[2]);
printf("elapsed wall time (GPU) = %5.2f ms\n", elapsedTime_gpu);
printf("=====cublasDDOT=====\n");
cublasHandle_t handle;
cublasCreate(&handle);
cudaEventRecord(timeStart, 0);
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j++) {
cublasDdot(handle, N, a_d + j * N, 1, b_d + i, K, d_d + i + j * K);
}
}
cudaEventRecord(timeStop, 0);
cudaEventSynchronize(timeStop);
cudaEventElapsedTime(&elapsedTime_DDOT, timeStart, timeStop);
printMatrix( d_d, M, K );
//printf("D[2] = %3.1f\n", d_d[2]);
printf("elapsed wall time (cublasDDOT) = %5.2f ms\n", elapsedTime_DDOT);
printf("=====cublasDAXPY=====\n");
cudaEventRecord(timeStart, 0);
for (int j = 0; j < M; j++) {
for (int i = 0; i < K; i++) {
cublasDaxpy(handle, M, b_d + j + i * K, a_d + i, N, e_d + j, K);
}
}
cudaEventRecord(timeStop, 0);
cudaEventSynchronize(timeStop);
cudaEventElapsedTime(&elapsedTime_DAXPY, timeStart, timeStop);
printMatrix( e_d, M, K );
//printf("E[2] = %3.1f\n", e_d[2]);
printf("elapsed wall time (cublasDAXPY) = %5.2f ms\n", elapsedTime_DAXPY);
printf("\n");
cublasDestroy(handle);
// Deallocating Memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaFree(d_d);
cudaFree(e_d);
cudaEventDestroy(timeStart);
cudaEventDestroy(timeStop);
return (EXIT_SUCCESS);
}
|
sample_2.2-2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
static const int M = 16;//
static const int N = 32;//
#define CHECK_STATUS(status) \
if (status != hipSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
hipGetErrorString(status))
//
__global__ void MatAdd(float *A, float *B, float *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N) {
int index = i * N + j;
C[index] = A[index] + B[index];
}
}
int main(int argc, char **argv) {
CHECK_STATUS(hipSetDevice(0));
const int SIZE = M * N;
float a[SIZE];
float b[SIZE];
for(int i = 0;i<SIZE;i++){
a[i] = i;
b[i] = i;
}
float c[SIZE];
float *d_a,*d_b,*d_c;
//
CHECK_STATUS(hipMalloc(&d_a, SIZE*sizeof(float)));
CHECK_STATUS(hipMalloc(&d_b, SIZE*sizeof(float)));
CHECK_STATUS(hipMalloc(&d_c, SIZE*sizeof(float)));
//
CHECK_STATUS(hipMemcpy(d_a,a,SIZE* sizeof(float),hipMemcpyHostToDevice));
CHECK_STATUS(hipMemcpy(d_b,b,SIZE* sizeof(float),hipMemcpyHostToDevice));
// kernel
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( MatAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c);
//
CHECK_STATUS(hipGetLastError());
//
CHECK_STATUS(hipMemcpy(c,d_c,SIZE* sizeof(float),hipMemcpyDeviceToHost));
//
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
printf("%f\t",c[i*N + j]);
printf("\n");
}
//
CHECK_STATUS(hipFree(d_a));
CHECK_STATUS(hipFree(d_b));
CHECK_STATUS(hipFree(d_c));
return 0;
}
|
sample_2.2-2.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
static const int M = 16;//行
static const int N = 32;//列
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
//二维数组相加
__global__ void MatAdd(float *A, float *B, float *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N) {
int index = i * N + j;
C[index] = A[index] + B[index];
}
}
int main(int argc, char **argv) {
CHECK_STATUS(cudaSetDevice(0));
const int SIZE = M * N;
float a[SIZE];
float b[SIZE];
for(int i = 0;i<SIZE;i++){
a[i] = i;
b[i] = i;
}
float c[SIZE];
float *d_a,*d_b,*d_c;
//分配显存
CHECK_STATUS(cudaMalloc(&d_a, SIZE*sizeof(float)));
CHECK_STATUS(cudaMalloc(&d_b, SIZE*sizeof(float)));
CHECK_STATUS(cudaMalloc(&d_c, SIZE*sizeof(float)));
// 把数据从内存复制到显存
CHECK_STATUS(cudaMemcpy(d_a,a,SIZE* sizeof(float),cudaMemcpyHostToDevice));
CHECK_STATUS(cudaMemcpy(d_b,b,SIZE* sizeof(float),cudaMemcpyHostToDevice));
// 调用kernel
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(M / threadsPerBlock.x, N / threadsPerBlock.y);
MatAdd<<<numBlocks, threadsPerBlock>>>(d_a, d_b, d_c);
// 检查错误
CHECK_STATUS(cudaGetLastError());
// 从显存把数据复制到内存
CHECK_STATUS(cudaMemcpy(c,d_c,SIZE* sizeof(float),cudaMemcpyDeviceToHost));
// 打印
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
printf("%f\t",c[i*N + j]);
printf("\n");
}
//释放显存
CHECK_STATUS(cudaFree(d_a));
CHECK_STATUS(cudaFree(d_b));
CHECK_STATUS(cudaFree(d_c));
return 0;
}
|
2b2bde364635594fb22923d82b5081120b81bb7b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "gmock/gmock-generated-matchers.h"
#include "high_res_clock.h"
#include "hip/hip_runtime_api.h"
#include <cugraph.h>
#include "test_utils.h"
#include <thrust/device_ptr.h>
#include <fstream>
std::vector<int>
getGoldenTopKIds(std::string file, int k = 10) {
std::vector<int> vec;
std::ifstream fin(file);
int val;
int count = 0;
while (fin>>val && ((count++) < k)) {
vec.push_back(val);
}
vec.resize(k);
return vec;
}
std::vector<int>
getTopKIds(gdf_column_ptr katz, int k = 10) {
int count = katz.get()->size;
hipStream_t stream = nullptr;
rmm::device_vector<int> id(count);
thrust::sequence(rmm::exec_policy(stream)->on(stream), id.begin(), id.end());
auto colptr = thrust::device_pointer_cast(static_cast<double*>(katz.get()->data));
thrust::sort_by_key(rmm::exec_policy(stream)->on(stream),
colptr, colptr + count, id.begin(), thrust::greater<double>());
std::vector<int> topK(k);
thrust::copy(id.begin(), id.begin() + k, topK.begin());
return topK;
}
int
getMaxDegree(cugraph::Graph * G) {
cugraph::add_adj_list(G);
std::vector<int> out_degree(G->numberOfVertices);
gdf_column_ptr col_out_degree = create_gdf_column(out_degree);
cugraph::degree(G, col_out_degree.get(), 2);
auto degreePtr = thrust::device_pointer_cast(static_cast<int*>(col_out_degree.get()->data));
hipStream_t stream = nullptr;
int max_out_degree = thrust::reduce(rmm::exec_policy(stream)->on(stream),
degreePtr, degreePtr + col_out_degree.get()->size, static_cast<int>(-1), thrust::maximum<int>());
return max_out_degree;
}
typedef struct Katz_Usecase_t {
std::string matrix_file;
std::string result_file;
Katz_Usecase_t(const std::string& a, const std::string& b) {
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
if ((b != "") && (b[0] != '/')) {
result_file = rapidsDatasetRootDir + "/" + b;
} else {
result_file = b;
}
}
Katz_Usecase_t& operator=(const Katz_Usecase_t& rhs) {
matrix_file = rhs.matrix_file;
result_file = rhs.result_file;
return *this;
}
} Katz_Usecase;
class Tests_Katz : public ::testing::TestWithParam<Katz_Usecase> {
public:
Tests_Katz() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
void run_current_test(const Katz_Usecase& param) {
Graph_ptr G{new cugraph::Graph, Graph_deleter};
gdf_column_ptr col_src, col_dest, col_katz_centrality;
FILE* fpin = fopen(param.matrix_file.c_str(),"r");
ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure.";
int m, k;
int nnz;
MM_typecode mc;
ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz),0) << "could not read Matrix Market file properties"<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<int> cooRowInd(nnz), cooColInd(nnz);
std::vector<int> cooVal(nnz);
std::vector<double> katz_centrality(m);
// Read
ASSERT_EQ( (mm_to_coo<int,int>(fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)) , 0)<< "could not read matrix data"<< "\n";
ASSERT_EQ(fclose(fpin),0);
// gdf columns
col_src = create_gdf_column(cooRowInd);
col_dest = create_gdf_column(cooColInd);
col_katz_centrality = create_gdf_column(katz_centrality);
cugraph::edge_list_view(G.get(), col_src.get(), col_dest.get(), nullptr);
int max_out_degree = getMaxDegree(G.get());
double alpha = 1/(static_cast<double>(max_out_degree) + 1);
cugraph::katz_centrality(G.get(), col_katz_centrality.get(), alpha, 100, 1e-6, false, true);
std::vector<int> top10CUGraph = getTopKIds(std::move(col_katz_centrality));
std::vector<int> top10Golden = getGoldenTopKIds(param.result_file);
EXPECT_THAT(top10CUGraph, ::testing::ContainerEq(top10Golden));
}
};
// --gtest_filter=*simple_test*
INSTANTIATE_TEST_CASE_P(simple_test, Tests_Katz,
::testing::Values( Katz_Usecase("test/datasets/karate.mtx", "ref/katz/karate.csv" )
,Katz_Usecase("test/datasets/netscience.mtx", "ref/katz/netscience.csv")
,Katz_Usecase("test/datasets/polbooks.mtx", "ref/katz/polbooks.csv" )
,Katz_Usecase("test/datasets/dolphins.mtx", "ref/katz/dolphins.csv" )
)
);
TEST_P(Tests_Katz, Check) {
run_current_test(GetParam());
}
int main( int argc, char** argv )
{
rmmInitialize(nullptr);
testing::InitGoogleTest(&argc,argv);
int rc = RUN_ALL_TESTS();
rmmFinalize();
return rc;
}
|
2b2bde364635594fb22923d82b5081120b81bb7b.cu
|
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "gmock/gmock-generated-matchers.h"
#include "high_res_clock.h"
#include "cuda_profiler_api.h"
#include <cugraph.h>
#include "test_utils.h"
#include <thrust/device_ptr.h>
#include <fstream>
std::vector<int>
getGoldenTopKIds(std::string file, int k = 10) {
std::vector<int> vec;
std::ifstream fin(file);
int val;
int count = 0;
while (fin>>val && ((count++) < k)) {
vec.push_back(val);
}
vec.resize(k);
return vec;
}
std::vector<int>
getTopKIds(gdf_column_ptr katz, int k = 10) {
int count = katz.get()->size;
cudaStream_t stream = nullptr;
rmm::device_vector<int> id(count);
thrust::sequence(rmm::exec_policy(stream)->on(stream), id.begin(), id.end());
auto colptr = thrust::device_pointer_cast(static_cast<double*>(katz.get()->data));
thrust::sort_by_key(rmm::exec_policy(stream)->on(stream),
colptr, colptr + count, id.begin(), thrust::greater<double>());
std::vector<int> topK(k);
thrust::copy(id.begin(), id.begin() + k, topK.begin());
return topK;
}
int
getMaxDegree(cugraph::Graph * G) {
cugraph::add_adj_list(G);
std::vector<int> out_degree(G->numberOfVertices);
gdf_column_ptr col_out_degree = create_gdf_column(out_degree);
cugraph::degree(G, col_out_degree.get(), 2);
auto degreePtr = thrust::device_pointer_cast(static_cast<int*>(col_out_degree.get()->data));
cudaStream_t stream = nullptr;
int max_out_degree = thrust::reduce(rmm::exec_policy(stream)->on(stream),
degreePtr, degreePtr + col_out_degree.get()->size, static_cast<int>(-1), thrust::maximum<int>());
return max_out_degree;
}
typedef struct Katz_Usecase_t {
std::string matrix_file;
std::string result_file;
Katz_Usecase_t(const std::string& a, const std::string& b) {
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
if ((b != "") && (b[0] != '/')) {
result_file = rapidsDatasetRootDir + "/" + b;
} else {
result_file = b;
}
}
Katz_Usecase_t& operator=(const Katz_Usecase_t& rhs) {
matrix_file = rhs.matrix_file;
result_file = rhs.result_file;
return *this;
}
} Katz_Usecase;
class Tests_Katz : public ::testing::TestWithParam<Katz_Usecase> {
public:
Tests_Katz() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
void run_current_test(const Katz_Usecase& param) {
Graph_ptr G{new cugraph::Graph, Graph_deleter};
gdf_column_ptr col_src, col_dest, col_katz_centrality;
FILE* fpin = fopen(param.matrix_file.c_str(),"r");
ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure.";
int m, k;
int nnz;
MM_typecode mc;
ASSERT_EQ(mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz),0) << "could not read Matrix Market file properties"<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_FALSE(mm_is_complex(mc));
ASSERT_FALSE(mm_is_skew(mc));
// Allocate memory on host
std::vector<int> cooRowInd(nnz), cooColInd(nnz);
std::vector<int> cooVal(nnz);
std::vector<double> katz_centrality(m);
// Read
ASSERT_EQ( (mm_to_coo<int,int>(fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)) , 0)<< "could not read matrix data"<< "\n";
ASSERT_EQ(fclose(fpin),0);
// gdf columns
col_src = create_gdf_column(cooRowInd);
col_dest = create_gdf_column(cooColInd);
col_katz_centrality = create_gdf_column(katz_centrality);
cugraph::edge_list_view(G.get(), col_src.get(), col_dest.get(), nullptr);
int max_out_degree = getMaxDegree(G.get());
double alpha = 1/(static_cast<double>(max_out_degree) + 1);
cugraph::katz_centrality(G.get(), col_katz_centrality.get(), alpha, 100, 1e-6, false, true);
std::vector<int> top10CUGraph = getTopKIds(std::move(col_katz_centrality));
std::vector<int> top10Golden = getGoldenTopKIds(param.result_file);
EXPECT_THAT(top10CUGraph, ::testing::ContainerEq(top10Golden));
}
};
// --gtest_filter=*simple_test*
INSTANTIATE_TEST_CASE_P(simple_test, Tests_Katz,
::testing::Values( Katz_Usecase("test/datasets/karate.mtx", "ref/katz/karate.csv" )
,Katz_Usecase("test/datasets/netscience.mtx", "ref/katz/netscience.csv")
,Katz_Usecase("test/datasets/polbooks.mtx", "ref/katz/polbooks.csv" )
,Katz_Usecase("test/datasets/dolphins.mtx", "ref/katz/dolphins.csv" )
)
);
TEST_P(Tests_Katz, Check) {
run_current_test(GetParam());
}
int main( int argc, char** argv )
{
rmmInitialize(nullptr);
testing::InitGoogleTest(&argc,argv);
int rc = RUN_ALL_TESTS();
rmmFinalize();
return rc;
}
|
f509d7b74003f10abfa9b92c3f343d67880b9b4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from magma_zmcsrcompressor_gpu.cu normal z -> c, Sat Nov 15 19:54:21 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#else
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#endif
// copy nonzeros into new structure
__global__ void
magma_cmcsrgpu_kernel1( int num_rows,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex zero = MAGMA_C_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_cmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_cmcsrgpu_kernel3( int num_rows,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
magmaFloatComplex zero = MAGMA_C_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param
A magma_c_sparse_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cmcsrcompressor_gpu(
magma_c_sparse_matrix *A,
magma_queue_t queue )
{
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
magma_int_t stat_cpu = 0, stat_dev = 0;
magma_c_sparse_matrix B, B2;
B.val = NULL;
B.col = NULL;
B.row = NULL;
B.rowidx = NULL;
B.blockinfo = NULL;
B.diag = NULL;
B.dval = NULL;
B.dcol = NULL;
B.drow = NULL;
B.drowidx = NULL;
B.ddiag = NULL;
B2.val = NULL;
B2.col = NULL;
B2.row = NULL;
B2.rowidx = NULL;
B2.blockinfo = NULL;
B2.diag = NULL;
B2.dval = NULL;
B2.dcol = NULL;
B2.drow = NULL;
B2.drowidx = NULL;
B2.ddiag = NULL;
stat_dev += magma_index_malloc( &B.drow, A->num_rows + 1 );
stat_dev += magma_index_malloc( &B2.drow, A->num_rows + 1 );
if( stat_dev != 0 ){
magma_c_mfree( &B, queue );
magma_c_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 );
dim3 grid1( (A->num_rows+BLOCK_SIZE1-1)/BLOCK_SIZE1, 1, 1);
// copying the nonzeros into B and write in B.drow how many there are
hipLaunchKernelGGL(( magma_cmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue ,
A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
hipLaunchKernelGGL(( magma_cmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue ,
A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
magma_index_t *cputmp;
stat_cpu += magma_index_malloc_cpu( &cputmp, 1 );
if( stat_cpu != 0 ){
magma_free_cpu( cputmp );
magma_c_mfree( &B, queue );
magma_c_mfree( &B2, queue );
return MAGMA_ERR_HOST_ALLOC;
}
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
stat_dev += magma_cmalloc( &B.dval, A->nnz );
stat_dev += magma_index_malloc( &B.dcol, A->nnz );
if( stat_dev != 0 ){
magma_c_mfree( &B, queue );
magma_c_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
// copy correct values back
hipLaunchKernelGGL(( magma_cmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue ,
A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
magma_free( B2.drow );
magma_free( B.drow );
return MAGMA_SUCCESS;
}
else {
magma_c_sparse_matrix dA, CSRA;
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
magma_c_mconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue );
magma_c_mtransfer( *A, &dA, A->memory_location, Magma_DEV, queue );
magma_cmcsrcompressor_gpu( &dA, queue );
magma_c_mfree( &dA, queue );
magma_c_mfree( A, queue );
magma_c_mtransfer( dA, &CSRA, Magma_DEV, A_location, queue );
magma_c_mconvert( CSRA, A, Magma_CSR, A_storage, queue );
magma_c_mfree( &dA, queue );
magma_c_mfree( &CSRA, queue );
return MAGMA_SUCCESS;
}
}
|
f509d7b74003f10abfa9b92c3f343d67880b9b4a.cu
|
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from magma_zmcsrcompressor_gpu.cu normal z -> c, Sat Nov 15 19:54:21 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#else
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
#endif
// copy nonzeros into new structure
__global__ void
magma_cmcsrgpu_kernel1( int num_rows,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex zero = MAGMA_C_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_cmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_cmcsrgpu_kernel3( int num_rows,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
magmaFloatComplex zero = MAGMA_C_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param
A magma_c_sparse_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cmcsrcompressor_gpu(
magma_c_sparse_matrix *A,
magma_queue_t queue )
{
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
magma_int_t stat_cpu = 0, stat_dev = 0;
magma_c_sparse_matrix B, B2;
B.val = NULL;
B.col = NULL;
B.row = NULL;
B.rowidx = NULL;
B.blockinfo = NULL;
B.diag = NULL;
B.dval = NULL;
B.dcol = NULL;
B.drow = NULL;
B.drowidx = NULL;
B.ddiag = NULL;
B2.val = NULL;
B2.col = NULL;
B2.row = NULL;
B2.rowidx = NULL;
B2.blockinfo = NULL;
B2.diag = NULL;
B2.dval = NULL;
B2.dcol = NULL;
B2.drow = NULL;
B2.drowidx = NULL;
B2.ddiag = NULL;
stat_dev += magma_index_malloc( &B.drow, A->num_rows + 1 );
stat_dev += magma_index_malloc( &B2.drow, A->num_rows + 1 );
if( stat_dev != 0 ){
magma_c_mfree( &B, queue );
magma_c_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 );
dim3 grid1( (A->num_rows+BLOCK_SIZE1-1)/BLOCK_SIZE1, 1, 1);
// copying the nonzeros into B and write in B.drow how many there are
magma_cmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue >>>
( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
magma_cmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue >>>
( A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
magma_index_t *cputmp;
stat_cpu += magma_index_malloc_cpu( &cputmp, 1 );
if( stat_cpu != 0 ){
magma_free_cpu( cputmp );
magma_c_mfree( &B, queue );
magma_c_mfree( &B2, queue );
return MAGMA_ERR_HOST_ALLOC;
}
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
stat_dev += magma_cmalloc( &B.dval, A->nnz );
stat_dev += magma_index_malloc( &B.dcol, A->nnz );
if( stat_dev != 0 ){
magma_c_mfree( &B, queue );
magma_c_mfree( &B2, queue );
return MAGMA_ERR_DEVICE_ALLOC;
}
// copy correct values back
magma_cmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue >>>
( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
magma_free( B2.drow );
magma_free( B.drow );
return MAGMA_SUCCESS;
}
else {
magma_c_sparse_matrix dA, CSRA;
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
magma_c_mconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue );
magma_c_mtransfer( *A, &dA, A->memory_location, Magma_DEV, queue );
magma_cmcsrcompressor_gpu( &dA, queue );
magma_c_mfree( &dA, queue );
magma_c_mfree( A, queue );
magma_c_mtransfer( dA, &CSRA, Magma_DEV, A_location, queue );
magma_c_mconvert( CSRA, A, Magma_CSR, A_storage, queue );
magma_c_mfree( &dA, queue );
magma_c_mfree( &CSRA, queue );
return MAGMA_SUCCESS;
}
}
|
447634df762e792dd174e2560e3fb48e930fb96f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/erf_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void ErfGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(__ldg(X+i), 2.0f)) * __ldg(dY + i);
#else
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(X[i], 2.0f)) * dY[i];
#endif
}
}
} // namespace
template <>
template <typename T>
bool ErfGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( ErfGradientCUDAKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Erf,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
ErfFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ErfGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
ErfGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
447634df762e792dd174e2560e3fb48e930fb96f.cu
|
#include "caffe2/operators/erf_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void ErfGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(__ldg(X+i), 2.0f)) * __ldg(dY + i);
#else
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(X[i], 2.0f)) * dY[i];
#endif
}
}
} // namespace
template <>
template <typename T>
bool ErfGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
ErfGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Erf,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
ErfFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ErfGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
ErfGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
58641dfe7c53e97f69ef4f4fa031ec282e04b449.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ReducePI2( float* d_sum, int num, float* d_pi ){
int id=threadIdx.x;
extern float __shared__ s_sum[];
s_sum[id]=d_sum[id];
__syncthreads();
for(int i=(blockDim.x>>1);i>0;i>>=1){
if(id<i)
s_sum[id]+=s_sum[id+i];
__syncthreads();
}
printf("%d,%f\n",id,s_sum[id]);
if(id==0){
*d_pi=s_sum[0]/num;
printf("%d,%f\n",id,*d_pi);
}
}
|
58641dfe7c53e97f69ef4f4fa031ec282e04b449.cu
|
#include "includes.h"
__global__ void ReducePI2( float* d_sum, int num, float* d_pi ){
int id=threadIdx.x;
extern float __shared__ s_sum[];
s_sum[id]=d_sum[id];
__syncthreads();
for(int i=(blockDim.x>>1);i>0;i>>=1){
if(id<i)
s_sum[id]+=s_sum[id+i];
__syncthreads();
}
printf("%d,%f\n",id,s_sum[id]);
if(id==0){
*d_pi=s_sum[0]/num;
printf("%d,%f\n",id,*d_pi);
}
}
|
3f878e6949d74229a2bee74a1ae1f5033dda7b1f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#define MAXKEYBYTES 56 /* 448 bits */
#define N 16
#define noErr 0
#define DATAERROR -1
#define KEYBYTES 8
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) {
if(hipSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
return ;
}
}
// This will output the proper error string when calling hipGetLastError
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
return;
}
}
uint32_t S[4][256] = {
{0xd1310ba6L, 0x98dfb5acL, 0x2ffd72dbL, 0xd01adfb7L, 0xb8e1afedL, 0x6a267e96L,
0xba7c9045L, 0xf12c7f99L, 0x24a19947L, 0xb3916cf7L, 0x0801f2e2L, 0x858efc16L,
0x636920d8L, 0x71574e69L, 0xa458fea3L, 0xf4933d7eL, 0x0d95748fL, 0x728eb658L,
0x718bcd58L, 0x82154aeeL, 0x7b54a41dL, 0xc25a59b5L, 0x9c30d539L, 0x2af26013L,
0xc5d1b023L, 0x286085f0L, 0xca417918L, 0xb8db38efL, 0x8e79dcb0L, 0x603a180eL,
0x6c9e0e8bL, 0xb01e8a3eL, 0xd71577c1L, 0xbd314b27L, 0x78af2fdaL, 0x55605c60L,
0xe65525f3L, 0xaa55ab94L, 0x57489862L, 0x63e81440L, 0x55ca396aL, 0x2aab10b6L,
0xb4cc5c34L, 0x1141e8ceL, 0xa15486afL, 0x7c72e993L, 0xb3ee1411L, 0x636fbc2aL,
0x2ba9c55dL, 0x741831f6L, 0xce5c3e16L, 0x9b87931eL, 0xafd6ba33L, 0x6c24cf5cL,
0x7a325381L, 0x28958677L, 0x3b8f4898L, 0x6b4bb9afL, 0xc4bfe81bL, 0x66282193L,
0x61d809ccL, 0xfb21a991L, 0x487cac60L, 0x5dec8032L, 0xef845d5dL, 0xe98575b1L,
0xdc262302L, 0xeb651b88L, 0x23893e81L, 0xd396acc5L, 0x0f6d6ff3L, 0x83f44239L,
0x2e0b4482L, 0xa4842004L, 0x69c8f04aL, 0x9e1f9b5eL, 0x21c66842L, 0xf6e96c9aL,
0x670c9c61L, 0xabd388f0L, 0x6a51a0d2L, 0xd8542f68L, 0x960fa728L, 0xab5133a3L,
0x6eef0b6cL, 0x137a3be4L, 0xba3bf050L, 0x7efb2a98L, 0xa1f1651dL, 0x39af0176L,
0x66ca593eL, 0x82430e88L, 0x8cee8619L, 0x456f9fb4L, 0x7d84a5c3L, 0x3b8b5ebeL,
0xe06f75d8L, 0x85c12073L, 0x401a449fL, 0x56c16aa6L, 0x4ed3aa62L, 0x363f7706L,
0x1bfedf72L, 0x429b023dL, 0x37d0d724L, 0xd00a1248L, 0xdb0fead3L, 0x49f1c09bL,
0x075372c9L, 0x80991b7bL, 0x25d479d8L, 0xf6e8def7L, 0xe3fe501aL, 0xb6794c3bL,
0x976ce0bdL, 0x04c006baL, 0xc1a94fb6L, 0x409f60c4L, 0x5e5c9ec2L, 0x196a2463L,
0x68fb6fafL, 0x3e6c53b5L, 0x1339b2ebL, 0x3b52ec6fL, 0x6dfc511fL, 0x9b30952cL,
0xcc814544L, 0xaf5ebd09L, 0xbee3d004L, 0xde334afdL, 0x660f2807L, 0x192e4bb3L,
0xc0cba857L, 0x45c8740fL, 0xd20b5f39L, 0xb9d3fbdbL, 0x5579c0bdL, 0x1a60320aL,
0xd6a100c6L, 0x402c7279L, 0x679f25feL, 0xfb1fa3ccL, 0x8ea5e9f8L, 0xdb3222f8L,
0x3c7516dfL, 0xfd616b15L, 0x2f501ec8L, 0xad0552abL, 0x323db5faL, 0xfd238760L,
0x53317b48L, 0x3e00df82L, 0x9e5c57bbL, 0xca6f8ca0L, 0x1a87562eL, 0xdf1769dbL,
0xd542a8f6L, 0x287effc3L, 0xac6732c6L, 0x8c4f5573L, 0x695b27b0L, 0xbbca58c8L,
0xe1ffa35dL, 0xb8f011a0L, 0x10fa3d98L, 0xfd2183b8L, 0x4afcb56cL, 0x2dd1d35bL,
0x9a53e479L, 0xb6f84565L, 0xd28e49bcL, 0x4bfb9790L, 0xe1ddf2daL, 0xa4cb7e33L,
0x62fb1341L, 0xcee4c6e8L, 0xef20cadaL, 0x36774c01L, 0xd07e9efeL, 0x2bf11fb4L,
0x95dbda4dL, 0xae909198L, 0xeaad8e71L, 0x6b93d5a0L, 0xd08ed1d0L, 0xafc725e0L,
0x8e3c5b2fL, 0x8e7594b7L, 0x8ff6e2fbL, 0xf2122b64L, 0x8888b812L, 0x900df01cL,
0x4fad5ea0L, 0x688fc31cL, 0xd1cff191L, 0xb3a8c1adL, 0x2f2f2218L, 0xbe0e1777L,
0xea752dfeL, 0x8b021fa1L, 0xe5a0cc0fL, 0xb56f74e8L, 0x18acf3d6L, 0xce89e299L,
0xb4a84fe0L, 0xfd13e0b7L, 0x7cc43b81L, 0xd2ada8d9L, 0x165fa266L, 0x80957705L,
0x93cc7314L, 0x211a1477L, 0xe6ad2065L, 0x77b5fa86L, 0xc75442f5L, 0xfb9d35cfL,
0xebcdaf0cL, 0x7b3e89a0L, 0xd6411bd3L, 0xae1e7e49L, 0x00250e2dL, 0x2071b35eL,
0x226800bbL, 0x57b8e0afL, 0x2464369bL, 0xf009b91eL, 0x5563911dL, 0x59dfa6aaL,
0x78c14389L, 0xd95a537fL, 0x207d5ba2L, 0x02e5b9c5L, 0x83260376L, 0x6295cfa9L,
0x11c81968L, 0x4e734a41L, 0xb3472dcaL, 0x7b14a94aL, 0x1b510052L, 0x9a532915L,
0xd60f573fL, 0xbc9bc6e4L, 0x2b60a476L, 0x81e67400L, 0x08ba6fb5L, 0x571be91fL,
0xf296ec6bL, 0x2a0dd915L, 0xb6636521L, 0xe7b9f9b6L, 0xff34052eL, 0xc5855664L,
0x53b02d5dL, 0xa99f8fa1L, 0x08ba4799L, 0x6e85076aL},
{0x4b7a70e9L, 0xb5b32944L, 0xdb75092eL, 0xc4192623L, 0xad6ea6b0L, 0x49a7df7dL,
0x9cee60b8L, 0x8fedb266L, 0xecaa8c71L, 0x699a17ffL, 0x5664526cL, 0xc2b19ee1L,
0x193602a5L, 0x75094c29L, 0xa0591340L, 0xe4183a3eL, 0x3f54989aL, 0x5b429d65L,
0x6b8fe4d6L, 0x99f73fd6L, 0xa1d29c07L, 0xefe830f5L, 0x4d2d38e6L, 0xf0255dc1L,
0x4cdd2086L, 0x8470eb26L, 0x6382e9c6L, 0x021ecc5eL, 0x09686b3fL, 0x3ebaefc9L,
0x3c971814L, 0x6b6a70a1L, 0x687f3584L, 0x52a0e286L, 0xb79c5305L, 0xaa500737L,
0x3e07841cL, 0x7fdeae5cL, 0x8e7d44ecL, 0x5716f2b8L, 0xb03ada37L, 0xf0500c0dL,
0xf01c1f04L, 0x0200b3ffL, 0xae0cf51aL, 0x3cb574b2L, 0x25837a58L, 0xdc0921bdL,
0xd19113f9L, 0x7ca92ff6L, 0x94324773L, 0x22f54701L, 0x3ae5e581L, 0x37c2dadcL,
0xc8b57634L, 0x9af3dda7L, 0xa9446146L, 0x0fd0030eL, 0xecc8c73eL, 0xa4751e41L,
0xe238cd99L, 0x3bea0e2fL, 0x3280bba1L, 0x183eb331L, 0x4e548b38L, 0x4f6db908L,
0x6f420d03L, 0xf60a04bfL, 0x2cb81290L, 0x24977c79L, 0x5679b072L, 0xbcaf89afL,
0xde9a771fL, 0xd9930810L, 0xb38bae12L, 0xdccf3f2eL, 0x5512721fL, 0x2e6b7124L,
0x501adde6L, 0x9f84cd87L, 0x7a584718L, 0x7408da17L, 0xbc9f9abcL, 0xe94b7d8cL,
0xec7aec3aL, 0xdb851dfaL, 0x63094366L, 0xc464c3d2L, 0xef1c1847L, 0x3215d908L,
0xdd433b37L, 0x24c2ba16L, 0x12a14d43L, 0x2a65c451L, 0x50940002L, 0x133ae4ddL,
0x71dff89eL, 0x10314e55L, 0x81ac77d6L, 0x5f11199bL, 0x043556f1L, 0xd7a3c76bL,
0x3c11183bL, 0x5924a509L, 0xf28fe6edL, 0x97f1fbfaL, 0x9ebabf2cL, 0x1e153c6eL,
0x86e34570L, 0xeae96fb1L, 0x860e5e0aL, 0x5a3e2ab3L, 0x771fe71cL, 0x4e3d06faL,
0x2965dcb9L, 0x99e71d0fL, 0x803e89d6L, 0x5266c825L, 0x2e4cc978L, 0x9c10b36aL,
0xc6150ebaL, 0x94e2ea78L, 0xa5fc3c53L, 0x1e0a2df4L, 0xf2f74ea7L, 0x361d2b3dL,
0x1939260fL, 0x19c27960L, 0x5223a708L, 0xf71312b6L, 0xebadfe6eL, 0xeac31f66L,
0xe3bc4595L, 0xa67bc883L, 0xb17f37d1L, 0x018cff28L, 0xc332ddefL, 0xbe6c5aa5L,
0x65582185L, 0x68ab9802L, 0xeecea50fL, 0xdb2f953bL, 0x2aef7dadL, 0x5b6e2f84L,
0x1521b628L, 0x29076170L, 0xecdd4775L, 0x619f1510L, 0x13cca830L, 0xeb61bd96L,
0x0334fe1eL, 0xaa0363cfL, 0xb5735c90L, 0x4c70a239L, 0xd59e9e0bL, 0xcbaade14L,
0xeecc86bcL, 0x60622ca7L, 0x9cab5cabL, 0xb2f3846eL, 0x648b1eafL, 0x19bdf0caL,
0xa02369b9L, 0x655abb50L, 0x40685a32L, 0x3c2ab4b3L, 0x319ee9d5L, 0xc021b8f7L,
0x9b540b19L, 0x875fa099L, 0x95f7997eL, 0x623d7da8L, 0xf837889aL, 0x97e32d77L,
0x11ed935fL, 0x16681281L, 0x0e358829L, 0xc7e61fd6L, 0x96dedfa1L, 0x7858ba99L,
0x57f584a5L, 0x1b227263L, 0x9b83c3ffL, 0x1ac24696L, 0xcdb30aebL, 0x532e3054L,
0x8fd948e4L, 0x6dbc3128L, 0x58ebf2efL, 0x34c6ffeaL, 0xfe28ed61L, 0xee7c3c73L,
0x5d4a14d9L, 0xe864b7e3L, 0x42105d14L, 0x203e13e0L, 0x45eee2b6L, 0xa3aaabeaL,
0xdb6c4f15L, 0xfacb4fd0L, 0xc742f442L, 0xef6abbb5L, 0x654f3b1dL, 0x41cd2105L,
0xd81e799eL, 0x86854dc7L, 0xe44b476aL, 0x3d816250L, 0xcf62a1f2L, 0x5b8d2646L,
0xfc8883a0L, 0xc1c7b6a3L, 0x7f1524c3L, 0x69cb7492L, 0x47848a0bL, 0x5692b285L,
0x095bbf00L, 0xad19489dL, 0x1462b174L, 0x23820e00L, 0x58428d2aL, 0x0c55f5eaL,
0x1dadf43eL, 0x233f7061L, 0x3372f092L, 0x8d937e41L, 0xd65fecf1L, 0x6c223bdbL,
0x7cde3759L, 0xcbee7460L, 0x4085f2a7L, 0xce77326eL, 0xa6078084L, 0x19f8509eL,
0xe8efd855L, 0x61d99735L, 0xa969a7aaL, 0xc50c06c2L, 0x5a04abfcL, 0x800bcadcL,
0x9e447a2eL, 0xc3453484L, 0xfdd56705L, 0x0e1e9ec9L, 0xdb73dbd3L, 0x105588cdL,
0x675fda79L, 0xe3674340L, 0xc5c43465L, 0x713e38d8L, 0x3d28f89eL, 0xf16dff20L,
0x153e21e7L, 0x8fb03d4aL, 0xe6e39f2bL, 0xdb83adf7L},
{0xe93d5a68L, 0x948140f7L, 0xf64c261cL, 0x94692934L, 0x411520f7L, 0x7602d4f7L,
0xbcf46b2eL, 0xd4a20068L, 0xd4082471L, 0x3320f46aL, 0x43b7d4b7L, 0x500061afL,
0x1e39f62eL, 0x97244546L, 0x14214f74L, 0xbf8b8840L, 0x4d95fc1dL, 0x96b591afL,
0x70f4ddd3L, 0x66a02f45L, 0xbfbc09ecL, 0x03bd9785L, 0x7fac6dd0L, 0x31cb8504L,
0x96eb27b3L, 0x55fd3941L, 0xda2547e6L, 0xabca0a9aL, 0x28507825L, 0x530429f4L,
0x0a2c86daL, 0xe9b66dfbL, 0x68dc1462L, 0xd7486900L, 0x680ec0a4L, 0x27a18deeL,
0x4f3ffea2L, 0xe887ad8cL, 0xb58ce006L, 0x7af4d6b6L, 0xaace1e7cL, 0xd3375fecL,
0xce78a399L, 0x406b2a42L, 0x20fe9e35L, 0xd9f385b9L, 0xee39d7abL, 0x3b124e8bL,
0x1dc9faf7L, 0x4b6d1856L, 0x26a36631L, 0xeae397b2L, 0x3a6efa74L, 0xdd5b4332L,
0x6841e7f7L, 0xca7820fbL, 0xfb0af54eL, 0xd8feb397L, 0x454056acL, 0xba489527L,
0x55533a3aL, 0x20838d87L, 0xfe6ba9b7L, 0xd096954bL, 0x55a867bcL, 0xa1159a58L,
0xcca92963L, 0x99e1db33L, 0xa62a4a56L, 0x3f3125f9L, 0x5ef47e1cL, 0x9029317cL,
0xfdf8e802L, 0x04272f70L, 0x80bb155cL, 0x05282ce3L, 0x95c11548L, 0xe4c66d22L,
0x48c1133fL, 0xc70f86dcL, 0x07f9c9eeL, 0x41041f0fL, 0x404779a4L, 0x5d886e17L,
0x325f51ebL, 0xd59bc0d1L, 0xf2bcc18fL, 0x41113564L, 0x257b7834L, 0x602a9c60L,
0xdff8e8a3L, 0x1f636c1bL, 0x0e12b4c2L, 0x02e1329eL, 0xaf664fd1L, 0xcad18115L,
0x6b2395e0L, 0x333e92e1L, 0x3b240b62L, 0xeebeb922L, 0x85b2a20eL, 0xe6ba0d99L,
0xde720c8cL, 0x2da2f728L, 0xd0127845L, 0x95b794fdL, 0x647d0862L, 0xe7ccf5f0L,
0x5449a36fL, 0x877d48faL, 0xc39dfd27L, 0xf33e8d1eL, 0x0a476341L, 0x992eff74L,
0x3a6f6eabL, 0xf4f8fd37L, 0xa812dc60L, 0xa1ebddf8L, 0x991be14cL, 0xdb6e6b0dL,
0xc67b5510L, 0x6d672c37L, 0x2765d43bL, 0xdcd0e804L, 0xf1290dc7L, 0xcc00ffa3L,
0xb5390f92L, 0x690fed0bL, 0x667b9ffbL, 0xcedb7d9cL, 0xa091cf0bL, 0xd9155ea3L,
0xbb132f88L, 0x515bad24L, 0x7b9479bfL, 0x763bd6ebL, 0x37392eb3L, 0xcc115979L,
0x8026e297L, 0xf42e312dL, 0x6842ada7L, 0xc66a2b3bL, 0x12754cccL, 0x782ef11cL,
0x6a124237L, 0xb79251e7L, 0x06a1bbe6L, 0x4bfb6350L, 0x1a6b1018L, 0x11caedfaL,
0x3d25bdd8L, 0xe2e1c3c9L, 0x44421659L, 0x0a121386L, 0xd90cec6eL, 0xd5abea2aL,
0x64af674eL, 0xda86a85fL, 0xbebfe988L, 0x64e4c3feL, 0x9dbc8057L, 0xf0f7c086L,
0x60787bf8L, 0x6003604dL, 0xd1fd8346L, 0xf6381fb0L, 0x7745ae04L, 0xd736fcccL,
0x83426b33L, 0xf01eab71L, 0xb0804187L, 0x3c005e5fL, 0x77a057beL, 0xbde8ae24L,
0x55464299L, 0xbf582e61L, 0x4e58f48fL, 0xf2ddfda2L, 0xf474ef38L, 0x8789bdc2L,
0x5366f9c3L, 0xc8b38e74L, 0xb475f255L, 0x46fcd9b9L, 0x7aeb2661L, 0x8b1ddf84L,
0x846a0e79L, 0x915f95e2L, 0x466e598eL, 0x20b45770L, 0x8cd55591L, 0xc902de4cL,
0xb90bace1L, 0xbb8205d0L, 0x11a86248L, 0x7574a99eL, 0xb77f19b6L, 0xe0a9dc09L,
0x662d09a1L, 0xc4324633L, 0xe85a1f02L, 0x09f0be8cL, 0x4a99a025L, 0x1d6efe10L,
0x1ab93d1dL, 0x0ba5a4dfL, 0xa186f20fL, 0x2868f169L, 0xdcb7da83L, 0x573906feL,
0xa1e2ce9bL, 0x4fcd7f52L, 0x50115e01L, 0xa70683faL, 0xa002b5c4L, 0x0de6d027L,
0x9af88c27L, 0x773f8641L, 0xc3604c06L, 0x61a806b5L, 0xf0177a28L, 0xc0f586e0L,
0x006058aaL, 0x30dc7d62L, 0x11e69ed7L, 0x2338ea63L, 0x53c2dd94L, 0xc2c21634L,
0xbbcbee56L, 0x90bcb6deL, 0xebfc7da1L, 0xce591d76L, 0x6f05e409L, 0x4b7c0188L,
0x39720a3dL, 0x7c927c24L, 0x86e3725fL, 0x724d9db9L, 0x1ac15bb4L, 0xd39eb8fcL,
0xed545578L, 0x08fca5b5L, 0xd83d7cd3L, 0x4dad0fc4L, 0x1e50ef5eL, 0xb161e6f8L,
0xa28514d9L, 0x6c51133cL, 0x6fd5c7e7L, 0x56e14ec4L, 0x362abfceL, 0xddc6c837L,
0xd79a3234L, 0x92638212L, 0x670efa8eL, 0x406000e0L},
{0x3a39ce37L, 0xd3faf5cfL, 0xabc27737L, 0x5ac52d1bL, 0x5cb0679eL, 0x4fa33742L,
0xd3822740L, 0x99bc9bbeL, 0xd5118e9dL, 0xbf0f7315L, 0xd62d1c7eL, 0xc700c47bL,
0xb78c1b6bL, 0x21a19045L, 0xb26eb1beL, 0x6a366eb4L, 0x5748ab2fL, 0xbc946e79L,
0xc6a376d2L, 0x6549c2c8L, 0x530ff8eeL, 0x468dde7dL, 0xd5730a1dL, 0x4cd04dc6L,
0x2939bbdbL, 0xa9ba4650L, 0xac9526e8L, 0xbe5ee304L, 0xa1fad5f0L, 0x6a2d519aL,
0x63ef8ce2L, 0x9a86ee22L, 0xc089c2b8L, 0x43242ef6L, 0xa51e03aaL, 0x9cf2d0a4L,
0x83c061baL, 0x9be96a4dL, 0x8fe51550L, 0xba645bd6L, 0x2826a2f9L, 0xa73a3ae1L,
0x4ba99586L, 0xef5562e9L, 0xc72fefd3L, 0xf752f7daL, 0x3f046f69L, 0x77fa0a59L,
0x80e4a915L, 0x87b08601L, 0x9b09e6adL, 0x3b3ee593L, 0xe990fd5aL, 0x9e34d797L,
0x2cf0b7d9L, 0x022b8b51L, 0x96d5ac3aL, 0x017da67dL, 0xd1cf3ed6L, 0x7c7d2d28L,
0x1f9f25cfL, 0xadf2b89bL, 0x5ad6b472L, 0x5a88f54cL, 0xe029ac71L, 0xe019a5e6L,
0x47b0acfdL, 0xed93fa9bL, 0xe8d3c48dL, 0x283b57ccL, 0xf8d56629L, 0x79132e28L,
0x785f0191L, 0xed756055L, 0xf7960e44L, 0xe3d35e8cL, 0x15056dd4L, 0x88f46dbaL,
0x03a16125L, 0x0564f0bdL, 0xc3eb9e15L, 0x3c9057a2L, 0x97271aecL, 0xa93a072aL,
0x1b3f6d9bL, 0x1e6321f5L, 0xf59c66fbL, 0x26dcf319L, 0x7533d928L, 0xb155fdf5L,
0x03563482L, 0x8aba3cbbL, 0x28517711L, 0xc20ad9f8L, 0xabcc5167L, 0xccad925fL,
0x4de81751L, 0x3830dc8eL, 0x379d5862L, 0x9320f991L, 0xea7a90c2L, 0xfb3e7bceL,
0x5121ce64L, 0x774fbe32L, 0xa8b6e37eL, 0xc3293d46L, 0x48de5369L, 0x6413e680L,
0xa2ae0810L, 0xdd6db224L, 0x69852dfdL, 0x09072166L, 0xb39a460aL, 0x6445c0ddL,
0x586cdecfL, 0x1c20c8aeL, 0x5bbef7ddL, 0x1b588d40L, 0xccd2017fL, 0x6bb4e3bbL,
0xdda26a7eL, 0x3a59ff45L, 0x3e350a44L, 0xbcb4cdd5L, 0x72eacea8L, 0xfa6484bbL,
0x8d6612aeL, 0xbf3c6f47L, 0xd29be463L, 0x542f5d9eL, 0xaec2771bL, 0xf64e6370L,
0x740e0d8dL, 0xe75b1357L, 0xf8721671L, 0xaf537d5dL, 0x4040cb08L, 0x4eb4e2ccL,
0x34d2466aL, 0x0115af84L, 0xe1b00428L, 0x95983a1dL, 0x06b89fb4L, 0xce6ea048L,
0x6f3f3b82L, 0x3520ab82L, 0x011a1d4bL, 0x277227f8L, 0x611560b1L, 0xe7933fdcL,
0xbb3a792bL, 0x344525bdL, 0xa08839e1L, 0x51ce794bL, 0x2f32c9b7L, 0xa01fbac9L,
0xe01cc87eL, 0xbcc7d1f6L, 0xcf0111c3L, 0xa1e8aac7L, 0x1a908749L, 0xd44fbd9aL,
0xd0dadecbL, 0xd50ada38L, 0x0339c32aL, 0xc6913667L, 0x8df9317cL, 0xe0b12b4fL,
0xf79e59b7L, 0x43f5bb3aL, 0xf2d519ffL, 0x27d9459cL, 0xbf97222cL, 0x15e6fc2aL,
0x0f91fc71L, 0x9b941525L, 0xfae59361L, 0xceb69cebL, 0xc2a86459L, 0x12baa8d1L,
0xb6c1075eL, 0xe3056a0cL, 0x10d25065L, 0xcb03a442L, 0xe0ec6e0eL, 0x1698db3bL,
0x4c98a0beL, 0x3278e964L, 0x9f1f9532L, 0xe0d392dfL, 0xd3a0342bL, 0x8971f21eL,
0x1b0a7441L, 0x4ba3348cL, 0xc5be7120L, 0xc37632d8L, 0xdf359f8dL, 0x9b992f2eL,
0xe60b6f47L, 0x0fe3f11dL, 0xe54cda54L, 0x1edad891L, 0xce6279cfL, 0xcd3e7e6fL,
0x1618b166L, 0xfd2c1d05L, 0x848fd2c5L, 0xf6fb2299L, 0xf523f357L, 0xa6327623L,
0x93a83531L, 0x56cccd02L, 0xacf08162L, 0x5a75ebb5L, 0x6e163697L, 0x88d273ccL,
0xde966292L, 0x81b949d0L, 0x4c50901bL, 0x71c65614L, 0xe6c6c7bdL, 0x327a140aL,
0x45e1d006L, 0xc3f27b9aL, 0xc9aa53fdL, 0x62a80f00L, 0xbb25bfe2L, 0x35bdd2f6L,
0x71126905L, 0xb2040222L, 0xb6cbcf7cL, 0xcd769c2bL, 0x53113ec0L, 0x1640e3d3L,
0x38abbd60L, 0x2547adf0L, 0xba38209cL, 0xf746ce76L, 0x77afa1c5L, 0x20756060L,
0x85cbfe4eL, 0x8ae88dd8L, 0x7aaaf9b0L, 0x4cf9aa7eL, 0x1948c25cL, 0x02fb8a8cL,
0x01c36ae4L, 0xd6ebe1f9L, 0x90d4f869L, 0xa65cdea0L, 0x3f09252dL, 0xc208e69fL,
0xb74e6132L, 0xce77e25bL, 0x578fdfe3L, 0x3ac372e6L}
};
uint32_t P[18] = {
0x243f6a88L, 0x85a308d3L, 0x13198a2eL, 0x03707344L, 0xa4093822L, 0x299f31d0L,
0x082efa98L, 0xec4e6c89L, 0x452821e6L, 0x38d01377L, 0xbe5466cfL, 0x34e90c6cL,
0xc0ac29b7L, 0xc97c50ddL, 0x3f84d5b5L, 0xb5470917L, 0x9216d5d9L, 0x8979fb1bL};
void checkbuffer(uint32_t *buf, int size) {
int i;
for (i = 0; i < size; i++) {
printf("%d: %x\n", i, buf[i]);
}
}
uint32_t F(uint32_t x) {
unsigned short a;
unsigned short b;
unsigned short c;
unsigned short d;
uint32_t y;
d = x & 0x00FF;
x >>= 8;
c = x & 0x00FF;
x >>= 8;
b = x & 0x00FF;
x >>= 8;
a = x & 0x00FF;
y = S[0][a] + S[1][b];
y = y ^ S[2][c];
y = y + S[3][d];
return y;
}
void Blowfish_encipher(uint32_t *xl, uint32_t *xr) {
uint32_t Xl;
uint32_t Xr;
uint32_t temp;
short i;
Xl = *xl;
Xr = *xr;
for (i = 0; i < N; ++i) {
Xl = Xl ^ P[i];
Xr = F(Xl) ^ Xr;
temp = Xl;
Xl = Xr;
Xr = temp;
}
temp = Xl;
Xl = Xr;
Xr = temp;
Xr = Xr ^ P[N];
Xl = Xl ^ P[N + 1];
*xl = Xl;
*xr = Xr;
}
void Blowfish_decipher(uint32_t *xl, uint32_t *xr) {
uint32_t Xl;
uint32_t Xr;
uint32_t temp;
short i;
Xl = *xl;
Xr = *xr;
for (i = N + 1; i > 1; --i) {
Xl = Xl ^ P[i];
Xr = F(Xl) ^ Xr;
/* Exchange Xl and Xr */
temp = Xl;
Xl = Xr;
Xr = temp;
}
/* Exchange Xl and Xr */
temp = Xl;
Xl = Xr;
Xr = temp;
Xr = Xr ^ P[1];
Xl = Xl ^ P[0];
*xl = Xl;
*xr = Xr;
}
short InitializeBlowfish(char key[], short keybytes) {
short i;
short j;
short k;
// short error;
// short numread;
uint32_t data;
uint32_t datal;
uint32_t datar;
j = 0;
for (i = 0; i < N + 2; ++i) {
data = 0x00000000;
for (k = 0; k < 4; ++k) {
data <<= 8;
data |= (uint32_t)key[j] & 0xff;
j = j + 1;
if (j >= keybytes) j = 0;
}
P[i] = P[i] ^ data;
}
datal = 0x00000000;
datar = 0x00000000;
for (i = 0; i < N + 2; i += 2) {
Blowfish_encipher(&datal, &datar);
P[i] = datal;
P[i + 1] = datar;
}
for (i = 0; i < 4; ++i) {
for (j = 0; j < 256; j += 2) {
Blowfish_encipher(&datal, &datar);
S[i][j] = datal;
S[i][j + 1] = datar;
}
}
//printf("Done!\n");
//printf("P[0]: %x\n", P[0]);
//printf("P[1]: %x\n", P[1]);
return 0;
}
// encrypt is 1, decrypt is 0
void FileEncrypt(uint32_t *out, uint32_t *in, int size, int encrypt) {
int i;
memset(out, 0, size);
memcpy(out, in, size);
if (encrypt) {
for (i = 0; i < size/sizeof(uint32_t)-1; i+=2) {
Blowfish_encipher(&out[i], &out[i+1]);
}
}
else {
for (i = 0; i < size/sizeof(uint32_t)-1; i+=2) {
Blowfish_decipher(&out[i], &out[i+1]);
}
}
}
__global__ void GpuFileEncrypt(uint32_t *out, uint32_t *in, uint32_t *d_s, uint32_t *d_p) {
// since there is no computation related other data within a block(e.g. the situations which are like
// matrix multiplication, sorting, reversing etc, where we need manipulate multiple block data), so
// I don't see any reason why we should use shared mem for the block data. I see p and s should be loaded into
// shared memory because they are frequently used during the loop.
__shared__ uint32_t s[1042]; // p 18 + s 1024 = 1042
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x * blockDim.x + tid;
unsigned int copy_s_begin_index = tid * 1024 / blockDim.x;
unsigned int copy_s_end_index = (tid + 1) * 1024 / blockDim.x;
int i = 0;
if (tid == 0) {
for (i = 0; i < 18; i++) {
s[i] = d_p[i];
}
}
__syncthreads();
for (i = copy_s_begin_index; i < copy_s_end_index; i++) {
s[i + 18] = d_s[i];
}
__syncthreads();
uint32_t xl = in[2 * gid];
uint32_t xr = in[2 * gid + 1];
uint32_t temp;
unsigned short a;
unsigned short b;
unsigned short c;
unsigned short d;
uint32_t x, y;
for (i = 0; i < 16; ++i) {
xl = xl ^ s[i];
x = xl;
d = x & 0x00FF;
x >>= 8;
c = x & 0x00FF;
x >>= 8;
b = x & 0x00FF;
x >>= 8;
a = x & 0x00FF;
//y = S[0][a] + S[1][b];
y = s[a + 18] + s[256 + b + 18];
//y = y ^ S[2][c];
y = y ^ s[512 + c + 18];
//y = y + S[3][d];
y = y + s[768 + d + 18];
xr = y ^ xr;
// swap xl and xr
temp = xl;
xl = xr;
xr = temp;
}
temp = xl;
xl = xr;
xr = temp;
xr = xr ^ s[16];
xl = xl ^ s[17];
out[2 * gid] = xl;
out[2 * gid + 1] = xr;
}
__global__ void GpuFileDecrypt(uint32_t *out, uint32_t *in, uint32_t *d_s, uint32_t *d_p) {
// since there is no computation related other data within a block(e.g. the situations which are like
// matrix multiplication, sorting, reversing etc, where we need manipulate multiple block data), so
// I don't see any reason why we should use shared mem for the block data. I see p and s should be loaded into
// shared memory because they are frequently used during the loop.
__shared__ uint32_t s[1042]; // p 18 + s 1024 = 1042
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x * blockDim.x + tid;
unsigned int copy_s_begin_index = tid * 1024 / blockDim.x;
unsigned int copy_s_end_index = (tid + 1) * 1024 / blockDim.x;
int i = 0;
if (tid == 0) {
for (i = 0; i < 18; i++) {
s[i] = d_p[i];
}
}
__syncthreads();
for (i = copy_s_begin_index; i < copy_s_end_index; i++) {
s[i + 18] = d_s[i];
}
__syncthreads();
uint32_t xl = in[2 * gid];
uint32_t xr = in[2 * gid + 1];
uint32_t temp;
unsigned short a;
unsigned short b;
unsigned short c;
unsigned short d;
uint32_t x, y;
for (i = 17; i > 1; --i) {
xl = xl ^ s[i];
x = xl;
d = x & 0x00FF;
x >>= 8;
c = x & 0x00FF;
x >>= 8;
b = x & 0x00FF;
x >>= 8;
a = x & 0x00FF;
//y = S[0][a] + S[1][b];
y = s[a + 18] + s[256 + b + 18];
//y = y ^ S[2][c];
y = y ^ s[512 + c + 18];
//y = y + S[3][d];
y = y + s[768 + d + 18];
xr = y ^ xr;
/* Exchange Xl and Xr */
temp = xl;
xl = xr;
xr = temp;
}
/* Exchange Xl and Xr */
temp = xl;
xl = xr;
xr = temp;
xr = xr ^ s[1];
xl = xl ^ s[0];
}
void example () {
uint32_t datal = 0xFFFFFFFF;
uint32_t datar = 0xFFFFFFFF;
printf("Clean text %#x%x\n", datal, datar);
Blowfish_encipher(&datal, &datar);
printf("Cipher text %#x%x\n", datal, datar);
Blowfish_decipher(&datal, &datar);
printf("Decipher text %#x%x\n", datal, datar);
}
int main()
{
char key[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
short keybytes = 8;
InitializeBlowfish(key, keybytes);
const char * path1 = "Test2Image.jpg";
const char * path2 = "Test2Image_out1.jpg";
const char * path3 = "Test2Image_out2.jpg";
FILE *in, *out;
int file_size = 0;
int encrypt;
printf("start of the program: press 1: encrypt, press 0 decrypt\n");
scanf("%d", &encrypt);
if (encrypt != 0 && encrypt != 1) {
printf("the encrypt/decrypt input is not correct\n");
return 0;
}
if (encrypt == 1) in = fopen(path1, "rb");
else in = fopen(path2, "rb");
if (in == NULL) {
printf("error: the input file is not open\n");
return 0;
}
fseek(in, 0L, SEEK_END);
file_size = ftell(in);
rewind(in);
uint32_t *buffer_in = (uint32_t *)malloc(file_size);
uint32_t *buffer_out = (uint32_t *)malloc(file_size);
if (buffer_in == NULL || buffer_out == NULL) {
printf("the buffers allocate memory failed!\n");
return 0;
}
int byte_read_success = fread((void *)buffer_in, sizeof(char), file_size, in);
if (byte_read_success != file_size) {
printf("byte_read_success: %d\n", byte_read_success);
printf("file read is abnormal: bytes read not equals to the file size\n");
return 0;
}
fclose(in);
// read file done
memcpy(buffer_out, buffer_in, file_size);
// GPU mem allocation, timer, device synchronization, etc
hipSetDevice(0);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
uint32_t *d_in, *d_out;
uint32_t *d_p, *d_s;
int mem_size = sizeof(uint32_t) * (file_size/sizeof(uint32_t) - 1);
// determine num of threads, block per grids, threads per block
// one thread should take charge of 2 uint_32t numbers
int total_launch_threads = mem_size/(sizeof(uint32_t) * 2);
dim3 threadsPerBlock(16, 1, 1); // 16 ~ 1024 though
dim3 blocksPerGrid(total_launch_threads/threadsPerBlock.x, 1, 1);
checkCudaErrors(hipMalloc((void**) &d_in, mem_size));
checkCudaErrors(hipMalloc((void**) &d_out, mem_size));
checkCudaErrors(hipMalloc((void**) &d_s, 4 * 256 * sizeof(uint32_t)));
checkCudaErrors(hipMalloc((void**) &d_p, 18 * sizeof(uint32_t)));
// CPU->GPU mem transfer: if mem transfer time should be counted, then cudaEventRecords should move above to the comment
checkCudaErrors(hipMemcpy(d_in, buffer_in, mem_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_s, S, 4*256 * sizeof(uint32_t), hipMemcpyHostToDevice)); // NOT sure if this is correct
checkCudaErrors(hipMemcpy(d_p, P, 18 * sizeof(uint32_t), hipMemcpyHostToDevice));
hipEventRecord(start, 0);
if (encrypt == 1)hipLaunchKernelGGL(( GpuFileEncrypt), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_out, d_in, d_s, d_p);
elsehipLaunchKernelGGL(( GpuFileDecrypt), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_out, d_in, d_s, d_p);
hipEventRecord(stop, 0);
hipEventElapsedTime(&time, start, stop);
// encrypt/decrypt done
// GPU->CPU mem trasfer: if mem transfer time should be counted, then cudaEventRecords should move below to the comment
checkCudaErrors(hipMemcpy(buffer_out, d_out, mem_size, hipMemcpyDeviceToHost));
// GPU mem deallocation
hipFree(d_s);
hipFree(d_p);
hipFree(d_in);
hipFree(d_out);
// open output file
if (encrypt == 1) out = fopen(path2, "wb");
else out = fopen(path3, "wb");
if (out == NULL) {
printf("error: the output file is not open\n");
return 0;
}
int byte_write_success = fwrite((void *)buffer_out, sizeof(char), file_size, out);
if (byte_write_success != (file_size)) {
printf("byte_write_success: %d\n", byte_write_success);
printf("file write is abnormal: bytes write not equals to the file size\n");
return 0;
}
if (encrypt == 1) printf ("file encryption took %6.3f milliseconds.\n", time * 1000);
else printf ("file decryption took %6.3f milliseconds.\n", time * 1000);
fclose(out);
free(buffer_in);
free(buffer_out);
return 0;
}
|
3f878e6949d74229a2bee74a1ae1f5033dda7b1f.cu
|
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#define MAXKEYBYTES 56 /* 448 bits */
#define N 16
#define noErr 0
#define DATAERROR -1
#define KEYBYTES 8
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ) {
if(cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
return ;
}
}
// This will output the proper error string when calling cudaGetLastError
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
return;
}
}
uint32_t S[4][256] = {
{0xd1310ba6L, 0x98dfb5acL, 0x2ffd72dbL, 0xd01adfb7L, 0xb8e1afedL, 0x6a267e96L,
0xba7c9045L, 0xf12c7f99L, 0x24a19947L, 0xb3916cf7L, 0x0801f2e2L, 0x858efc16L,
0x636920d8L, 0x71574e69L, 0xa458fea3L, 0xf4933d7eL, 0x0d95748fL, 0x728eb658L,
0x718bcd58L, 0x82154aeeL, 0x7b54a41dL, 0xc25a59b5L, 0x9c30d539L, 0x2af26013L,
0xc5d1b023L, 0x286085f0L, 0xca417918L, 0xb8db38efL, 0x8e79dcb0L, 0x603a180eL,
0x6c9e0e8bL, 0xb01e8a3eL, 0xd71577c1L, 0xbd314b27L, 0x78af2fdaL, 0x55605c60L,
0xe65525f3L, 0xaa55ab94L, 0x57489862L, 0x63e81440L, 0x55ca396aL, 0x2aab10b6L,
0xb4cc5c34L, 0x1141e8ceL, 0xa15486afL, 0x7c72e993L, 0xb3ee1411L, 0x636fbc2aL,
0x2ba9c55dL, 0x741831f6L, 0xce5c3e16L, 0x9b87931eL, 0xafd6ba33L, 0x6c24cf5cL,
0x7a325381L, 0x28958677L, 0x3b8f4898L, 0x6b4bb9afL, 0xc4bfe81bL, 0x66282193L,
0x61d809ccL, 0xfb21a991L, 0x487cac60L, 0x5dec8032L, 0xef845d5dL, 0xe98575b1L,
0xdc262302L, 0xeb651b88L, 0x23893e81L, 0xd396acc5L, 0x0f6d6ff3L, 0x83f44239L,
0x2e0b4482L, 0xa4842004L, 0x69c8f04aL, 0x9e1f9b5eL, 0x21c66842L, 0xf6e96c9aL,
0x670c9c61L, 0xabd388f0L, 0x6a51a0d2L, 0xd8542f68L, 0x960fa728L, 0xab5133a3L,
0x6eef0b6cL, 0x137a3be4L, 0xba3bf050L, 0x7efb2a98L, 0xa1f1651dL, 0x39af0176L,
0x66ca593eL, 0x82430e88L, 0x8cee8619L, 0x456f9fb4L, 0x7d84a5c3L, 0x3b8b5ebeL,
0xe06f75d8L, 0x85c12073L, 0x401a449fL, 0x56c16aa6L, 0x4ed3aa62L, 0x363f7706L,
0x1bfedf72L, 0x429b023dL, 0x37d0d724L, 0xd00a1248L, 0xdb0fead3L, 0x49f1c09bL,
0x075372c9L, 0x80991b7bL, 0x25d479d8L, 0xf6e8def7L, 0xe3fe501aL, 0xb6794c3bL,
0x976ce0bdL, 0x04c006baL, 0xc1a94fb6L, 0x409f60c4L, 0x5e5c9ec2L, 0x196a2463L,
0x68fb6fafL, 0x3e6c53b5L, 0x1339b2ebL, 0x3b52ec6fL, 0x6dfc511fL, 0x9b30952cL,
0xcc814544L, 0xaf5ebd09L, 0xbee3d004L, 0xde334afdL, 0x660f2807L, 0x192e4bb3L,
0xc0cba857L, 0x45c8740fL, 0xd20b5f39L, 0xb9d3fbdbL, 0x5579c0bdL, 0x1a60320aL,
0xd6a100c6L, 0x402c7279L, 0x679f25feL, 0xfb1fa3ccL, 0x8ea5e9f8L, 0xdb3222f8L,
0x3c7516dfL, 0xfd616b15L, 0x2f501ec8L, 0xad0552abL, 0x323db5faL, 0xfd238760L,
0x53317b48L, 0x3e00df82L, 0x9e5c57bbL, 0xca6f8ca0L, 0x1a87562eL, 0xdf1769dbL,
0xd542a8f6L, 0x287effc3L, 0xac6732c6L, 0x8c4f5573L, 0x695b27b0L, 0xbbca58c8L,
0xe1ffa35dL, 0xb8f011a0L, 0x10fa3d98L, 0xfd2183b8L, 0x4afcb56cL, 0x2dd1d35bL,
0x9a53e479L, 0xb6f84565L, 0xd28e49bcL, 0x4bfb9790L, 0xe1ddf2daL, 0xa4cb7e33L,
0x62fb1341L, 0xcee4c6e8L, 0xef20cadaL, 0x36774c01L, 0xd07e9efeL, 0x2bf11fb4L,
0x95dbda4dL, 0xae909198L, 0xeaad8e71L, 0x6b93d5a0L, 0xd08ed1d0L, 0xafc725e0L,
0x8e3c5b2fL, 0x8e7594b7L, 0x8ff6e2fbL, 0xf2122b64L, 0x8888b812L, 0x900df01cL,
0x4fad5ea0L, 0x688fc31cL, 0xd1cff191L, 0xb3a8c1adL, 0x2f2f2218L, 0xbe0e1777L,
0xea752dfeL, 0x8b021fa1L, 0xe5a0cc0fL, 0xb56f74e8L, 0x18acf3d6L, 0xce89e299L,
0xb4a84fe0L, 0xfd13e0b7L, 0x7cc43b81L, 0xd2ada8d9L, 0x165fa266L, 0x80957705L,
0x93cc7314L, 0x211a1477L, 0xe6ad2065L, 0x77b5fa86L, 0xc75442f5L, 0xfb9d35cfL,
0xebcdaf0cL, 0x7b3e89a0L, 0xd6411bd3L, 0xae1e7e49L, 0x00250e2dL, 0x2071b35eL,
0x226800bbL, 0x57b8e0afL, 0x2464369bL, 0xf009b91eL, 0x5563911dL, 0x59dfa6aaL,
0x78c14389L, 0xd95a537fL, 0x207d5ba2L, 0x02e5b9c5L, 0x83260376L, 0x6295cfa9L,
0x11c81968L, 0x4e734a41L, 0xb3472dcaL, 0x7b14a94aL, 0x1b510052L, 0x9a532915L,
0xd60f573fL, 0xbc9bc6e4L, 0x2b60a476L, 0x81e67400L, 0x08ba6fb5L, 0x571be91fL,
0xf296ec6bL, 0x2a0dd915L, 0xb6636521L, 0xe7b9f9b6L, 0xff34052eL, 0xc5855664L,
0x53b02d5dL, 0xa99f8fa1L, 0x08ba4799L, 0x6e85076aL},
{0x4b7a70e9L, 0xb5b32944L, 0xdb75092eL, 0xc4192623L, 0xad6ea6b0L, 0x49a7df7dL,
0x9cee60b8L, 0x8fedb266L, 0xecaa8c71L, 0x699a17ffL, 0x5664526cL, 0xc2b19ee1L,
0x193602a5L, 0x75094c29L, 0xa0591340L, 0xe4183a3eL, 0x3f54989aL, 0x5b429d65L,
0x6b8fe4d6L, 0x99f73fd6L, 0xa1d29c07L, 0xefe830f5L, 0x4d2d38e6L, 0xf0255dc1L,
0x4cdd2086L, 0x8470eb26L, 0x6382e9c6L, 0x021ecc5eL, 0x09686b3fL, 0x3ebaefc9L,
0x3c971814L, 0x6b6a70a1L, 0x687f3584L, 0x52a0e286L, 0xb79c5305L, 0xaa500737L,
0x3e07841cL, 0x7fdeae5cL, 0x8e7d44ecL, 0x5716f2b8L, 0xb03ada37L, 0xf0500c0dL,
0xf01c1f04L, 0x0200b3ffL, 0xae0cf51aL, 0x3cb574b2L, 0x25837a58L, 0xdc0921bdL,
0xd19113f9L, 0x7ca92ff6L, 0x94324773L, 0x22f54701L, 0x3ae5e581L, 0x37c2dadcL,
0xc8b57634L, 0x9af3dda7L, 0xa9446146L, 0x0fd0030eL, 0xecc8c73eL, 0xa4751e41L,
0xe238cd99L, 0x3bea0e2fL, 0x3280bba1L, 0x183eb331L, 0x4e548b38L, 0x4f6db908L,
0x6f420d03L, 0xf60a04bfL, 0x2cb81290L, 0x24977c79L, 0x5679b072L, 0xbcaf89afL,
0xde9a771fL, 0xd9930810L, 0xb38bae12L, 0xdccf3f2eL, 0x5512721fL, 0x2e6b7124L,
0x501adde6L, 0x9f84cd87L, 0x7a584718L, 0x7408da17L, 0xbc9f9abcL, 0xe94b7d8cL,
0xec7aec3aL, 0xdb851dfaL, 0x63094366L, 0xc464c3d2L, 0xef1c1847L, 0x3215d908L,
0xdd433b37L, 0x24c2ba16L, 0x12a14d43L, 0x2a65c451L, 0x50940002L, 0x133ae4ddL,
0x71dff89eL, 0x10314e55L, 0x81ac77d6L, 0x5f11199bL, 0x043556f1L, 0xd7a3c76bL,
0x3c11183bL, 0x5924a509L, 0xf28fe6edL, 0x97f1fbfaL, 0x9ebabf2cL, 0x1e153c6eL,
0x86e34570L, 0xeae96fb1L, 0x860e5e0aL, 0x5a3e2ab3L, 0x771fe71cL, 0x4e3d06faL,
0x2965dcb9L, 0x99e71d0fL, 0x803e89d6L, 0x5266c825L, 0x2e4cc978L, 0x9c10b36aL,
0xc6150ebaL, 0x94e2ea78L, 0xa5fc3c53L, 0x1e0a2df4L, 0xf2f74ea7L, 0x361d2b3dL,
0x1939260fL, 0x19c27960L, 0x5223a708L, 0xf71312b6L, 0xebadfe6eL, 0xeac31f66L,
0xe3bc4595L, 0xa67bc883L, 0xb17f37d1L, 0x018cff28L, 0xc332ddefL, 0xbe6c5aa5L,
0x65582185L, 0x68ab9802L, 0xeecea50fL, 0xdb2f953bL, 0x2aef7dadL, 0x5b6e2f84L,
0x1521b628L, 0x29076170L, 0xecdd4775L, 0x619f1510L, 0x13cca830L, 0xeb61bd96L,
0x0334fe1eL, 0xaa0363cfL, 0xb5735c90L, 0x4c70a239L, 0xd59e9e0bL, 0xcbaade14L,
0xeecc86bcL, 0x60622ca7L, 0x9cab5cabL, 0xb2f3846eL, 0x648b1eafL, 0x19bdf0caL,
0xa02369b9L, 0x655abb50L, 0x40685a32L, 0x3c2ab4b3L, 0x319ee9d5L, 0xc021b8f7L,
0x9b540b19L, 0x875fa099L, 0x95f7997eL, 0x623d7da8L, 0xf837889aL, 0x97e32d77L,
0x11ed935fL, 0x16681281L, 0x0e358829L, 0xc7e61fd6L, 0x96dedfa1L, 0x7858ba99L,
0x57f584a5L, 0x1b227263L, 0x9b83c3ffL, 0x1ac24696L, 0xcdb30aebL, 0x532e3054L,
0x8fd948e4L, 0x6dbc3128L, 0x58ebf2efL, 0x34c6ffeaL, 0xfe28ed61L, 0xee7c3c73L,
0x5d4a14d9L, 0xe864b7e3L, 0x42105d14L, 0x203e13e0L, 0x45eee2b6L, 0xa3aaabeaL,
0xdb6c4f15L, 0xfacb4fd0L, 0xc742f442L, 0xef6abbb5L, 0x654f3b1dL, 0x41cd2105L,
0xd81e799eL, 0x86854dc7L, 0xe44b476aL, 0x3d816250L, 0xcf62a1f2L, 0x5b8d2646L,
0xfc8883a0L, 0xc1c7b6a3L, 0x7f1524c3L, 0x69cb7492L, 0x47848a0bL, 0x5692b285L,
0x095bbf00L, 0xad19489dL, 0x1462b174L, 0x23820e00L, 0x58428d2aL, 0x0c55f5eaL,
0x1dadf43eL, 0x233f7061L, 0x3372f092L, 0x8d937e41L, 0xd65fecf1L, 0x6c223bdbL,
0x7cde3759L, 0xcbee7460L, 0x4085f2a7L, 0xce77326eL, 0xa6078084L, 0x19f8509eL,
0xe8efd855L, 0x61d99735L, 0xa969a7aaL, 0xc50c06c2L, 0x5a04abfcL, 0x800bcadcL,
0x9e447a2eL, 0xc3453484L, 0xfdd56705L, 0x0e1e9ec9L, 0xdb73dbd3L, 0x105588cdL,
0x675fda79L, 0xe3674340L, 0xc5c43465L, 0x713e38d8L, 0x3d28f89eL, 0xf16dff20L,
0x153e21e7L, 0x8fb03d4aL, 0xe6e39f2bL, 0xdb83adf7L},
{0xe93d5a68L, 0x948140f7L, 0xf64c261cL, 0x94692934L, 0x411520f7L, 0x7602d4f7L,
0xbcf46b2eL, 0xd4a20068L, 0xd4082471L, 0x3320f46aL, 0x43b7d4b7L, 0x500061afL,
0x1e39f62eL, 0x97244546L, 0x14214f74L, 0xbf8b8840L, 0x4d95fc1dL, 0x96b591afL,
0x70f4ddd3L, 0x66a02f45L, 0xbfbc09ecL, 0x03bd9785L, 0x7fac6dd0L, 0x31cb8504L,
0x96eb27b3L, 0x55fd3941L, 0xda2547e6L, 0xabca0a9aL, 0x28507825L, 0x530429f4L,
0x0a2c86daL, 0xe9b66dfbL, 0x68dc1462L, 0xd7486900L, 0x680ec0a4L, 0x27a18deeL,
0x4f3ffea2L, 0xe887ad8cL, 0xb58ce006L, 0x7af4d6b6L, 0xaace1e7cL, 0xd3375fecL,
0xce78a399L, 0x406b2a42L, 0x20fe9e35L, 0xd9f385b9L, 0xee39d7abL, 0x3b124e8bL,
0x1dc9faf7L, 0x4b6d1856L, 0x26a36631L, 0xeae397b2L, 0x3a6efa74L, 0xdd5b4332L,
0x6841e7f7L, 0xca7820fbL, 0xfb0af54eL, 0xd8feb397L, 0x454056acL, 0xba489527L,
0x55533a3aL, 0x20838d87L, 0xfe6ba9b7L, 0xd096954bL, 0x55a867bcL, 0xa1159a58L,
0xcca92963L, 0x99e1db33L, 0xa62a4a56L, 0x3f3125f9L, 0x5ef47e1cL, 0x9029317cL,
0xfdf8e802L, 0x04272f70L, 0x80bb155cL, 0x05282ce3L, 0x95c11548L, 0xe4c66d22L,
0x48c1133fL, 0xc70f86dcL, 0x07f9c9eeL, 0x41041f0fL, 0x404779a4L, 0x5d886e17L,
0x325f51ebL, 0xd59bc0d1L, 0xf2bcc18fL, 0x41113564L, 0x257b7834L, 0x602a9c60L,
0xdff8e8a3L, 0x1f636c1bL, 0x0e12b4c2L, 0x02e1329eL, 0xaf664fd1L, 0xcad18115L,
0x6b2395e0L, 0x333e92e1L, 0x3b240b62L, 0xeebeb922L, 0x85b2a20eL, 0xe6ba0d99L,
0xde720c8cL, 0x2da2f728L, 0xd0127845L, 0x95b794fdL, 0x647d0862L, 0xe7ccf5f0L,
0x5449a36fL, 0x877d48faL, 0xc39dfd27L, 0xf33e8d1eL, 0x0a476341L, 0x992eff74L,
0x3a6f6eabL, 0xf4f8fd37L, 0xa812dc60L, 0xa1ebddf8L, 0x991be14cL, 0xdb6e6b0dL,
0xc67b5510L, 0x6d672c37L, 0x2765d43bL, 0xdcd0e804L, 0xf1290dc7L, 0xcc00ffa3L,
0xb5390f92L, 0x690fed0bL, 0x667b9ffbL, 0xcedb7d9cL, 0xa091cf0bL, 0xd9155ea3L,
0xbb132f88L, 0x515bad24L, 0x7b9479bfL, 0x763bd6ebL, 0x37392eb3L, 0xcc115979L,
0x8026e297L, 0xf42e312dL, 0x6842ada7L, 0xc66a2b3bL, 0x12754cccL, 0x782ef11cL,
0x6a124237L, 0xb79251e7L, 0x06a1bbe6L, 0x4bfb6350L, 0x1a6b1018L, 0x11caedfaL,
0x3d25bdd8L, 0xe2e1c3c9L, 0x44421659L, 0x0a121386L, 0xd90cec6eL, 0xd5abea2aL,
0x64af674eL, 0xda86a85fL, 0xbebfe988L, 0x64e4c3feL, 0x9dbc8057L, 0xf0f7c086L,
0x60787bf8L, 0x6003604dL, 0xd1fd8346L, 0xf6381fb0L, 0x7745ae04L, 0xd736fcccL,
0x83426b33L, 0xf01eab71L, 0xb0804187L, 0x3c005e5fL, 0x77a057beL, 0xbde8ae24L,
0x55464299L, 0xbf582e61L, 0x4e58f48fL, 0xf2ddfda2L, 0xf474ef38L, 0x8789bdc2L,
0x5366f9c3L, 0xc8b38e74L, 0xb475f255L, 0x46fcd9b9L, 0x7aeb2661L, 0x8b1ddf84L,
0x846a0e79L, 0x915f95e2L, 0x466e598eL, 0x20b45770L, 0x8cd55591L, 0xc902de4cL,
0xb90bace1L, 0xbb8205d0L, 0x11a86248L, 0x7574a99eL, 0xb77f19b6L, 0xe0a9dc09L,
0x662d09a1L, 0xc4324633L, 0xe85a1f02L, 0x09f0be8cL, 0x4a99a025L, 0x1d6efe10L,
0x1ab93d1dL, 0x0ba5a4dfL, 0xa186f20fL, 0x2868f169L, 0xdcb7da83L, 0x573906feL,
0xa1e2ce9bL, 0x4fcd7f52L, 0x50115e01L, 0xa70683faL, 0xa002b5c4L, 0x0de6d027L,
0x9af88c27L, 0x773f8641L, 0xc3604c06L, 0x61a806b5L, 0xf0177a28L, 0xc0f586e0L,
0x006058aaL, 0x30dc7d62L, 0x11e69ed7L, 0x2338ea63L, 0x53c2dd94L, 0xc2c21634L,
0xbbcbee56L, 0x90bcb6deL, 0xebfc7da1L, 0xce591d76L, 0x6f05e409L, 0x4b7c0188L,
0x39720a3dL, 0x7c927c24L, 0x86e3725fL, 0x724d9db9L, 0x1ac15bb4L, 0xd39eb8fcL,
0xed545578L, 0x08fca5b5L, 0xd83d7cd3L, 0x4dad0fc4L, 0x1e50ef5eL, 0xb161e6f8L,
0xa28514d9L, 0x6c51133cL, 0x6fd5c7e7L, 0x56e14ec4L, 0x362abfceL, 0xddc6c837L,
0xd79a3234L, 0x92638212L, 0x670efa8eL, 0x406000e0L},
{0x3a39ce37L, 0xd3faf5cfL, 0xabc27737L, 0x5ac52d1bL, 0x5cb0679eL, 0x4fa33742L,
0xd3822740L, 0x99bc9bbeL, 0xd5118e9dL, 0xbf0f7315L, 0xd62d1c7eL, 0xc700c47bL,
0xb78c1b6bL, 0x21a19045L, 0xb26eb1beL, 0x6a366eb4L, 0x5748ab2fL, 0xbc946e79L,
0xc6a376d2L, 0x6549c2c8L, 0x530ff8eeL, 0x468dde7dL, 0xd5730a1dL, 0x4cd04dc6L,
0x2939bbdbL, 0xa9ba4650L, 0xac9526e8L, 0xbe5ee304L, 0xa1fad5f0L, 0x6a2d519aL,
0x63ef8ce2L, 0x9a86ee22L, 0xc089c2b8L, 0x43242ef6L, 0xa51e03aaL, 0x9cf2d0a4L,
0x83c061baL, 0x9be96a4dL, 0x8fe51550L, 0xba645bd6L, 0x2826a2f9L, 0xa73a3ae1L,
0x4ba99586L, 0xef5562e9L, 0xc72fefd3L, 0xf752f7daL, 0x3f046f69L, 0x77fa0a59L,
0x80e4a915L, 0x87b08601L, 0x9b09e6adL, 0x3b3ee593L, 0xe990fd5aL, 0x9e34d797L,
0x2cf0b7d9L, 0x022b8b51L, 0x96d5ac3aL, 0x017da67dL, 0xd1cf3ed6L, 0x7c7d2d28L,
0x1f9f25cfL, 0xadf2b89bL, 0x5ad6b472L, 0x5a88f54cL, 0xe029ac71L, 0xe019a5e6L,
0x47b0acfdL, 0xed93fa9bL, 0xe8d3c48dL, 0x283b57ccL, 0xf8d56629L, 0x79132e28L,
0x785f0191L, 0xed756055L, 0xf7960e44L, 0xe3d35e8cL, 0x15056dd4L, 0x88f46dbaL,
0x03a16125L, 0x0564f0bdL, 0xc3eb9e15L, 0x3c9057a2L, 0x97271aecL, 0xa93a072aL,
0x1b3f6d9bL, 0x1e6321f5L, 0xf59c66fbL, 0x26dcf319L, 0x7533d928L, 0xb155fdf5L,
0x03563482L, 0x8aba3cbbL, 0x28517711L, 0xc20ad9f8L, 0xabcc5167L, 0xccad925fL,
0x4de81751L, 0x3830dc8eL, 0x379d5862L, 0x9320f991L, 0xea7a90c2L, 0xfb3e7bceL,
0x5121ce64L, 0x774fbe32L, 0xa8b6e37eL, 0xc3293d46L, 0x48de5369L, 0x6413e680L,
0xa2ae0810L, 0xdd6db224L, 0x69852dfdL, 0x09072166L, 0xb39a460aL, 0x6445c0ddL,
0x586cdecfL, 0x1c20c8aeL, 0x5bbef7ddL, 0x1b588d40L, 0xccd2017fL, 0x6bb4e3bbL,
0xdda26a7eL, 0x3a59ff45L, 0x3e350a44L, 0xbcb4cdd5L, 0x72eacea8L, 0xfa6484bbL,
0x8d6612aeL, 0xbf3c6f47L, 0xd29be463L, 0x542f5d9eL, 0xaec2771bL, 0xf64e6370L,
0x740e0d8dL, 0xe75b1357L, 0xf8721671L, 0xaf537d5dL, 0x4040cb08L, 0x4eb4e2ccL,
0x34d2466aL, 0x0115af84L, 0xe1b00428L, 0x95983a1dL, 0x06b89fb4L, 0xce6ea048L,
0x6f3f3b82L, 0x3520ab82L, 0x011a1d4bL, 0x277227f8L, 0x611560b1L, 0xe7933fdcL,
0xbb3a792bL, 0x344525bdL, 0xa08839e1L, 0x51ce794bL, 0x2f32c9b7L, 0xa01fbac9L,
0xe01cc87eL, 0xbcc7d1f6L, 0xcf0111c3L, 0xa1e8aac7L, 0x1a908749L, 0xd44fbd9aL,
0xd0dadecbL, 0xd50ada38L, 0x0339c32aL, 0xc6913667L, 0x8df9317cL, 0xe0b12b4fL,
0xf79e59b7L, 0x43f5bb3aL, 0xf2d519ffL, 0x27d9459cL, 0xbf97222cL, 0x15e6fc2aL,
0x0f91fc71L, 0x9b941525L, 0xfae59361L, 0xceb69cebL, 0xc2a86459L, 0x12baa8d1L,
0xb6c1075eL, 0xe3056a0cL, 0x10d25065L, 0xcb03a442L, 0xe0ec6e0eL, 0x1698db3bL,
0x4c98a0beL, 0x3278e964L, 0x9f1f9532L, 0xe0d392dfL, 0xd3a0342bL, 0x8971f21eL,
0x1b0a7441L, 0x4ba3348cL, 0xc5be7120L, 0xc37632d8L, 0xdf359f8dL, 0x9b992f2eL,
0xe60b6f47L, 0x0fe3f11dL, 0xe54cda54L, 0x1edad891L, 0xce6279cfL, 0xcd3e7e6fL,
0x1618b166L, 0xfd2c1d05L, 0x848fd2c5L, 0xf6fb2299L, 0xf523f357L, 0xa6327623L,
0x93a83531L, 0x56cccd02L, 0xacf08162L, 0x5a75ebb5L, 0x6e163697L, 0x88d273ccL,
0xde966292L, 0x81b949d0L, 0x4c50901bL, 0x71c65614L, 0xe6c6c7bdL, 0x327a140aL,
0x45e1d006L, 0xc3f27b9aL, 0xc9aa53fdL, 0x62a80f00L, 0xbb25bfe2L, 0x35bdd2f6L,
0x71126905L, 0xb2040222L, 0xb6cbcf7cL, 0xcd769c2bL, 0x53113ec0L, 0x1640e3d3L,
0x38abbd60L, 0x2547adf0L, 0xba38209cL, 0xf746ce76L, 0x77afa1c5L, 0x20756060L,
0x85cbfe4eL, 0x8ae88dd8L, 0x7aaaf9b0L, 0x4cf9aa7eL, 0x1948c25cL, 0x02fb8a8cL,
0x01c36ae4L, 0xd6ebe1f9L, 0x90d4f869L, 0xa65cdea0L, 0x3f09252dL, 0xc208e69fL,
0xb74e6132L, 0xce77e25bL, 0x578fdfe3L, 0x3ac372e6L}
};
uint32_t P[18] = {
0x243f6a88L, 0x85a308d3L, 0x13198a2eL, 0x03707344L, 0xa4093822L, 0x299f31d0L,
0x082efa98L, 0xec4e6c89L, 0x452821e6L, 0x38d01377L, 0xbe5466cfL, 0x34e90c6cL,
0xc0ac29b7L, 0xc97c50ddL, 0x3f84d5b5L, 0xb5470917L, 0x9216d5d9L, 0x8979fb1bL};
void checkbuffer(uint32_t *buf, int size) {
int i;
for (i = 0; i < size; i++) {
printf("%d: %x\n", i, buf[i]);
}
}
uint32_t F(uint32_t x) {
unsigned short a;
unsigned short b;
unsigned short c;
unsigned short d;
uint32_t y;
d = x & 0x00FF;
x >>= 8;
c = x & 0x00FF;
x >>= 8;
b = x & 0x00FF;
x >>= 8;
a = x & 0x00FF;
y = S[0][a] + S[1][b];
y = y ^ S[2][c];
y = y + S[3][d];
return y;
}
void Blowfish_encipher(uint32_t *xl, uint32_t *xr) {
uint32_t Xl;
uint32_t Xr;
uint32_t temp;
short i;
Xl = *xl;
Xr = *xr;
for (i = 0; i < N; ++i) {
Xl = Xl ^ P[i];
Xr = F(Xl) ^ Xr;
temp = Xl;
Xl = Xr;
Xr = temp;
}
temp = Xl;
Xl = Xr;
Xr = temp;
Xr = Xr ^ P[N];
Xl = Xl ^ P[N + 1];
*xl = Xl;
*xr = Xr;
}
void Blowfish_decipher(uint32_t *xl, uint32_t *xr) {
uint32_t Xl;
uint32_t Xr;
uint32_t temp;
short i;
Xl = *xl;
Xr = *xr;
for (i = N + 1; i > 1; --i) {
Xl = Xl ^ P[i];
Xr = F(Xl) ^ Xr;
/* Exchange Xl and Xr */
temp = Xl;
Xl = Xr;
Xr = temp;
}
/* Exchange Xl and Xr */
temp = Xl;
Xl = Xr;
Xr = temp;
Xr = Xr ^ P[1];
Xl = Xl ^ P[0];
*xl = Xl;
*xr = Xr;
}
short InitializeBlowfish(char key[], short keybytes) {
short i;
short j;
short k;
// short error;
// short numread;
uint32_t data;
uint32_t datal;
uint32_t datar;
j = 0;
for (i = 0; i < N + 2; ++i) {
data = 0x00000000;
for (k = 0; k < 4; ++k) {
data <<= 8;
data |= (uint32_t)key[j] & 0xff;
j = j + 1;
if (j >= keybytes) j = 0;
}
P[i] = P[i] ^ data;
}
datal = 0x00000000;
datar = 0x00000000;
for (i = 0; i < N + 2; i += 2) {
Blowfish_encipher(&datal, &datar);
P[i] = datal;
P[i + 1] = datar;
}
for (i = 0; i < 4; ++i) {
for (j = 0; j < 256; j += 2) {
Blowfish_encipher(&datal, &datar);
S[i][j] = datal;
S[i][j + 1] = datar;
}
}
//printf("Done!\n");
//printf("P[0]: %x\n", P[0]);
//printf("P[1]: %x\n", P[1]);
return 0;
}
// encrypt is 1, decrypt is 0
void FileEncrypt(uint32_t *out, uint32_t *in, int size, int encrypt) {
int i;
memset(out, 0, size);
memcpy(out, in, size);
if (encrypt) {
for (i = 0; i < size/sizeof(uint32_t)-1; i+=2) {
Blowfish_encipher(&out[i], &out[i+1]);
}
}
else {
for (i = 0; i < size/sizeof(uint32_t)-1; i+=2) {
Blowfish_decipher(&out[i], &out[i+1]);
}
}
}
__global__ void GpuFileEncrypt(uint32_t *out, uint32_t *in, uint32_t *d_s, uint32_t *d_p) {
// since there is no computation related other data within a block(e.g. the situations which are like
// matrix multiplication, sorting, reversing etc, where we need manipulate multiple block data), so
// I don't see any reason why we should use shared mem for the block data. I see p and s should be loaded into
// shared memory because they are frequently used during the loop.
__shared__ uint32_t s[1042]; // p 18 + s 1024 = 1042
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x * blockDim.x + tid;
unsigned int copy_s_begin_index = tid * 1024 / blockDim.x;
unsigned int copy_s_end_index = (tid + 1) * 1024 / blockDim.x;
int i = 0;
if (tid == 0) {
for (i = 0; i < 18; i++) {
s[i] = d_p[i];
}
}
__syncthreads();
for (i = copy_s_begin_index; i < copy_s_end_index; i++) {
s[i + 18] = d_s[i];
}
__syncthreads();
uint32_t xl = in[2 * gid];
uint32_t xr = in[2 * gid + 1];
uint32_t temp;
unsigned short a;
unsigned short b;
unsigned short c;
unsigned short d;
uint32_t x, y;
for (i = 0; i < 16; ++i) {
xl = xl ^ s[i];
x = xl;
d = x & 0x00FF;
x >>= 8;
c = x & 0x00FF;
x >>= 8;
b = x & 0x00FF;
x >>= 8;
a = x & 0x00FF;
//y = S[0][a] + S[1][b];
y = s[a + 18] + s[256 + b + 18];
//y = y ^ S[2][c];
y = y ^ s[512 + c + 18];
//y = y + S[3][d];
y = y + s[768 + d + 18];
xr = y ^ xr;
// swap xl and xr
temp = xl;
xl = xr;
xr = temp;
}
temp = xl;
xl = xr;
xr = temp;
xr = xr ^ s[16];
xl = xl ^ s[17];
out[2 * gid] = xl;
out[2 * gid + 1] = xr;
}
__global__ void GpuFileDecrypt(uint32_t *out, uint32_t *in, uint32_t *d_s, uint32_t *d_p) {
// since there is no computation related other data within a block(e.g. the situations which are like
// matrix multiplication, sorting, reversing etc, where we need manipulate multiple block data), so
// I don't see any reason why we should use shared mem for the block data. I see p and s should be loaded into
// shared memory because they are frequently used during the loop.
__shared__ uint32_t s[1042]; // p 18 + s 1024 = 1042
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x * blockDim.x + tid;
unsigned int copy_s_begin_index = tid * 1024 / blockDim.x;
unsigned int copy_s_end_index = (tid + 1) * 1024 / blockDim.x;
int i = 0;
if (tid == 0) {
for (i = 0; i < 18; i++) {
s[i] = d_p[i];
}
}
__syncthreads();
for (i = copy_s_begin_index; i < copy_s_end_index; i++) {
s[i + 18] = d_s[i];
}
__syncthreads();
uint32_t xl = in[2 * gid];
uint32_t xr = in[2 * gid + 1];
uint32_t temp;
unsigned short a;
unsigned short b;
unsigned short c;
unsigned short d;
uint32_t x, y;
for (i = 17; i > 1; --i) {
xl = xl ^ s[i];
x = xl;
d = x & 0x00FF;
x >>= 8;
c = x & 0x00FF;
x >>= 8;
b = x & 0x00FF;
x >>= 8;
a = x & 0x00FF;
//y = S[0][a] + S[1][b];
y = s[a + 18] + s[256 + b + 18];
//y = y ^ S[2][c];
y = y ^ s[512 + c + 18];
//y = y + S[3][d];
y = y + s[768 + d + 18];
xr = y ^ xr;
/* Exchange Xl and Xr */
temp = xl;
xl = xr;
xr = temp;
}
/* Exchange Xl and Xr */
temp = xl;
xl = xr;
xr = temp;
xr = xr ^ s[1];
xl = xl ^ s[0];
}
void example () {
uint32_t datal = 0xFFFFFFFF;
uint32_t datar = 0xFFFFFFFF;
printf("Clean text %#x%x\n", datal, datar);
Blowfish_encipher(&datal, &datar);
printf("Cipher text %#x%x\n", datal, datar);
Blowfish_decipher(&datal, &datar);
printf("Decipher text %#x%x\n", datal, datar);
}
int main()
{
char key[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
short keybytes = 8;
InitializeBlowfish(key, keybytes);
const char * path1 = "Test2Image.jpg";
const char * path2 = "Test2Image_out1.jpg";
const char * path3 = "Test2Image_out2.jpg";
FILE *in, *out;
int file_size = 0;
int encrypt;
printf("start of the program: press 1: encrypt, press 0 decrypt\n");
scanf("%d", &encrypt);
if (encrypt != 0 && encrypt != 1) {
printf("the encrypt/decrypt input is not correct\n");
return 0;
}
if (encrypt == 1) in = fopen(path1, "rb");
else in = fopen(path2, "rb");
if (in == NULL) {
printf("error: the input file is not open\n");
return 0;
}
fseek(in, 0L, SEEK_END);
file_size = ftell(in);
rewind(in);
uint32_t *buffer_in = (uint32_t *)malloc(file_size);
uint32_t *buffer_out = (uint32_t *)malloc(file_size);
if (buffer_in == NULL || buffer_out == NULL) {
printf("the buffers allocate memory failed!\n");
return 0;
}
int byte_read_success = fread((void *)buffer_in, sizeof(char), file_size, in);
if (byte_read_success != file_size) {
printf("byte_read_success: %d\n", byte_read_success);
printf("file read is abnormal: bytes read not equals to the file size\n");
return 0;
}
fclose(in);
// read file done
memcpy(buffer_out, buffer_in, file_size);
// GPU mem allocation, timer, device synchronization, etc
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaThreadSynchronize();
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
uint32_t *d_in, *d_out;
uint32_t *d_p, *d_s;
int mem_size = sizeof(uint32_t) * (file_size/sizeof(uint32_t) - 1);
// determine num of threads, block per grids, threads per block
// one thread should take charge of 2 uint_32t numbers
int total_launch_threads = mem_size/(sizeof(uint32_t) * 2);
dim3 threadsPerBlock(16, 1, 1); // 16 ~ 1024 though
dim3 blocksPerGrid(total_launch_threads/threadsPerBlock.x, 1, 1);
checkCudaErrors(cudaMalloc((void**) &d_in, mem_size));
checkCudaErrors(cudaMalloc((void**) &d_out, mem_size));
checkCudaErrors(cudaMalloc((void**) &d_s, 4 * 256 * sizeof(uint32_t)));
checkCudaErrors(cudaMalloc((void**) &d_p, 18 * sizeof(uint32_t)));
// CPU->GPU mem transfer: if mem transfer time should be counted, then cudaEventRecords should move above to the comment
checkCudaErrors(cudaMemcpy(d_in, buffer_in, mem_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_s, S, 4*256 * sizeof(uint32_t), cudaMemcpyHostToDevice)); // NOT sure if this is correct
checkCudaErrors(cudaMemcpy(d_p, P, 18 * sizeof(uint32_t), cudaMemcpyHostToDevice));
cudaEventRecord(start, 0);
if (encrypt == 1) GpuFileEncrypt<<<blocksPerGrid, threadsPerBlock>>> (d_out, d_in, d_s, d_p);
else GpuFileDecrypt<<<blocksPerGrid, threadsPerBlock>>> (d_out, d_in, d_s, d_p);
cudaEventRecord(stop, 0);
cudaEventElapsedTime(&time, start, stop);
// encrypt/decrypt done
// GPU->CPU mem trasfer: if mem transfer time should be counted, then cudaEventRecords should move below to the comment
checkCudaErrors(cudaMemcpy(buffer_out, d_out, mem_size, cudaMemcpyDeviceToHost));
// GPU mem deallocation
cudaFree(d_s);
cudaFree(d_p);
cudaFree(d_in);
cudaFree(d_out);
// open output file
if (encrypt == 1) out = fopen(path2, "wb");
else out = fopen(path3, "wb");
if (out == NULL) {
printf("error: the output file is not open\n");
return 0;
}
int byte_write_success = fwrite((void *)buffer_out, sizeof(char), file_size, out);
if (byte_write_success != (file_size)) {
printf("byte_write_success: %d\n", byte_write_success);
printf("file write is abnormal: bytes write not equals to the file size\n");
return 0;
}
if (encrypt == 1) printf ("file encryption took %6.3f milliseconds.\n", time * 1000);
else printf ("file decryption took %6.3f milliseconds.\n", time * 1000);
fclose(out);
free(buffer_in);
free(buffer_out);
return 0;
}
|
a583a77c2b4e14b764cbed22777a0e357476cf7c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ReductionExecution.hpp"
namespace MNN {
namespace CUDA {
ReductionExecution::ReductionExecution(ReductionType opType, int axis, Backend *backend) : Execution(backend) {
mType = opType;
mAxis = axis;
}
ReductionExecution::~ ReductionExecution() {
// Do nothing
}
template <typename T>
__global__ void SUM(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
T sumValue = (T)0;
const T* basicInput = input + y * axis * inside + x;
for (int v=0; v<axis; ++v) {
sumValue += basicInput[v * inside];
}
output[y * inside + x] = sumValue;
}
return;
}
template <typename T>
__global__ void MEAN(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
T sumValue = (T)0;
const T* basicInput = input + y * axis * inside + x;
for (int v=0; v<axis; ++v) {
sumValue += basicInput[v * inside];
}
output[y * inside + x] = sumValue / (T)axis;
}
return;
}
template <typename T>
__global__ void MINIMUM(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* basicInput = input + y * axis * inside + x;
T res = basicInput[0];
for (int v=1; v<axis; ++v) {
res = min(basicInput[v * inside], res);
}
output[y * inside + x] = res;
}
return;
}
template <typename T>
__global__ void MAXIMUM(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* basicInput = input + y * axis * inside + x;
T res = basicInput[0];
for (int v=1; v<axis; ++v) {
res = max(basicInput[v * inside], res);
}
output[y * inside + x] = res;
}
return;
}
template <typename T>
__global__ void PROD(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* basicInput = input + y * axis * inside + x;
T res = basicInput[0];
for (int v=1; v<axis; ++v) {
res *= basicInput[v * inside];
}
output[y * inside + x] = res;
}
return;
}
ErrorCode ReductionExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = (void*)inputs[0]->deviceId();
auto output = (void*)outputs[0]->deviceId();
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = 1;
int outside = 1;
int axis = inputs[0]->length(mAxis);
for (int i=0; i<mAxis; ++i) {
outside *= inputs[0]->length(i);
}
for (int i=mAxis+1; i<inputs[0]->dimensions(); ++i) {
inside *= inputs[0]->length(i);
}
int count = inside * outside;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
if (inputs[0]->getType() == halide_type_of<float>()) {
switch (mType) {
case ReductionType_MEAN:
hipLaunchKernelGGL(( MEAN), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_SUM:
hipLaunchKernelGGL(( SUM), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MINIMUM:
hipLaunchKernelGGL(( MINIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MAXIMUM:
hipLaunchKernelGGL(( MAXIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_PROD:
hipLaunchKernelGGL(( PROD), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
MNN_ASSERT(inputs[0]->getType() == halide_type_of<int32_t>());
switch (mType) {
case ReductionType_MEAN:
hipLaunchKernelGGL(( MEAN), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_SUM:
hipLaunchKernelGGL(( SUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MINIMUM:
hipLaunchKernelGGL(( MINIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MAXIMUM:
hipLaunchKernelGGL(( MAXIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_PROD:
hipLaunchKernelGGL(( PROD), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_ANY:
hipLaunchKernelGGL(( MAXIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_ALL:
hipLaunchKernelGGL(( MINIMUM), dim3(block_num), dim3(threads_num), 0, 0, (const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
class ReductionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto type = inputs[0]->getType();
if (type.bits != 32) {
return nullptr;
}
if (type.code != halide_type_float && type.code != halide_type_int) {
return nullptr;
}
auto axis = op->main_as_ReductionParam()->dim()->data()[0];
auto opType = op->main_as_ReductionParam()->operation();
return new ReductionExecution(opType, axis, backend);
}
};
static CUDACreatorRegister<ReductionCreator> __init(OpType_Reduction);
}
}
|
a583a77c2b4e14b764cbed22777a0e357476cf7c.cu
|
#include "ReductionExecution.hpp"
namespace MNN {
namespace CUDA {
ReductionExecution::ReductionExecution(ReductionType opType, int axis, Backend *backend) : Execution(backend) {
mType = opType;
mAxis = axis;
}
ReductionExecution::~ ReductionExecution() {
// Do nothing
}
template <typename T>
__global__ void SUM(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
T sumValue = (T)0;
const T* basicInput = input + y * axis * inside + x;
for (int v=0; v<axis; ++v) {
sumValue += basicInput[v * inside];
}
output[y * inside + x] = sumValue;
}
return;
}
template <typename T>
__global__ void MEAN(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
T sumValue = (T)0;
const T* basicInput = input + y * axis * inside + x;
for (int v=0; v<axis; ++v) {
sumValue += basicInput[v * inside];
}
output[y * inside + x] = sumValue / (T)axis;
}
return;
}
template <typename T>
__global__ void MINIMUM(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* basicInput = input + y * axis * inside + x;
T res = basicInput[0];
for (int v=1; v<axis; ++v) {
res = min(basicInput[v * inside], res);
}
output[y * inside + x] = res;
}
return;
}
template <typename T>
__global__ void MAXIMUM(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* basicInput = input + y * axis * inside + x;
T res = basicInput[0];
for (int v=1; v<axis; ++v) {
res = max(basicInput[v * inside], res);
}
output[y * inside + x] = res;
}
return;
}
template <typename T>
__global__ void PROD(const T *input, T *output, int inside, int axis, int outside) {
int count = inside * outside;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
int y = i / inside;
int x = i % inside;
const T* basicInput = input + y * axis * inside + x;
T res = basicInput[0];
for (int v=1; v<axis; ++v) {
res *= basicInput[v * inside];
}
output[y * inside + x] = res;
}
return;
}
ErrorCode ReductionExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto input = (void*)inputs[0]->deviceId();
auto output = (void*)outputs[0]->deviceId();
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int inside = 1;
int outside = 1;
int axis = inputs[0]->length(mAxis);
for (int i=0; i<mAxis; ++i) {
outside *= inputs[0]->length(i);
}
for (int i=mAxis+1; i<inputs[0]->dimensions(); ++i) {
inside *= inputs[0]->length(i);
}
int count = inside * outside;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
if (inputs[0]->getType() == halide_type_of<float>()) {
switch (mType) {
case ReductionType_MEAN:
MEAN<<<block_num, threads_num>>>((const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_SUM:
SUM<<<block_num, threads_num>>>((const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MINIMUM:
MINIMUM<<<block_num, threads_num>>>((const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MAXIMUM:
MAXIMUM<<<block_num, threads_num>>>((const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_PROD:
PROD<<<block_num, threads_num>>>((const float*)input, (float*)output, inside, axis, outside);
return NO_ERROR;
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
MNN_ASSERT(inputs[0]->getType() == halide_type_of<int32_t>());
switch (mType) {
case ReductionType_MEAN:
MEAN<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_SUM:
SUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MINIMUM:
MINIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_MAXIMUM:
MAXIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_PROD:
PROD<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_ANY:
MAXIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
case ReductionType_ALL:
MINIMUM<<<block_num, threads_num>>>((const int32_t*)input, (int32_t*)output, inside, axis, outside);
return NO_ERROR;
}
MNN_ASSERT(false);
return NOT_SUPPORT;
}
class ReductionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
auto type = inputs[0]->getType();
if (type.bits != 32) {
return nullptr;
}
if (type.code != halide_type_float && type.code != halide_type_int) {
return nullptr;
}
auto axis = op->main_as_ReductionParam()->dim()->data()[0];
auto opType = op->main_as_ReductionParam()->operation();
return new ReductionExecution(opType, axis, backend);
}
};
static CUDACreatorRegister<ReductionCreator> __init(OpType_Reduction);
}
}
|
0fa5017cbe27d00bce5f3784197af54d7c42cd37.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/allocators/ext_veb_nosize.cuh>
#include <poggers/allocators/alloc_memory_table.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace poggers::allocators;
// __global__ void test_kernel(veb_tree * tree, uint64_t num_removes, int num_iterations){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid >= num_removes)return;
// //printf("Tid %lu\n", tid);
// for (int i=0; i< num_iterations; i++){
// if (!tree->remove(tid)){
// printf("BUG\n");
// }
// tree->insert(tid);
// }
template <uint64_t mem_segment_size, uint64_t num_bits>
__host__ void boot_ext_tree(){
using tree_type = extending_veb_allocator_nosize<mem_segment_size>;
tree_type * tree_to_boot = tree_type::generate_on_device(num_bits, 1342);
hipDeviceSynchronize();
//tree_type::free_on_device(tree_to_boot);
hipDeviceSynchronize();
}
template <uint64_t mem_segment_size>
__host__ void boot_alloc_table(){
using table_type = alloc_table<mem_segment_size>;
table_type * table = table_type::generate_on_device();
hipDeviceSynchronize();
table_type::free_on_device(table);
}
// }
// __global__ void view_kernel(veb_tree * tree){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid != 0)return;
// }
//using allocator_type = buddy_allocator<0,0>;
int main(int argc, char** argv) {
boot_ext_tree<8ULL*1024*1024, 16ULL>();
boot_ext_tree<8ULL*1024*1024, 4096ULL>();
boot_alloc_table<8ULL*1024*1024>();
hipDeviceReset();
return 0;
}
|
0fa5017cbe27d00bce5f3784197af54d7c42cd37.cu
|
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/allocators/ext_veb_nosize.cuh>
#include <poggers/allocators/alloc_memory_table.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace poggers::allocators;
// __global__ void test_kernel(veb_tree * tree, uint64_t num_removes, int num_iterations){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid >= num_removes)return;
// //printf("Tid %lu\n", tid);
// for (int i=0; i< num_iterations; i++){
// if (!tree->remove(tid)){
// printf("BUG\n");
// }
// tree->insert(tid);
// }
template <uint64_t mem_segment_size, uint64_t num_bits>
__host__ void boot_ext_tree(){
using tree_type = extending_veb_allocator_nosize<mem_segment_size>;
tree_type * tree_to_boot = tree_type::generate_on_device(num_bits, 1342);
cudaDeviceSynchronize();
//tree_type::free_on_device(tree_to_boot);
cudaDeviceSynchronize();
}
template <uint64_t mem_segment_size>
__host__ void boot_alloc_table(){
using table_type = alloc_table<mem_segment_size>;
table_type * table = table_type::generate_on_device();
cudaDeviceSynchronize();
table_type::free_on_device(table);
}
// }
// __global__ void view_kernel(veb_tree * tree){
// uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
// if (tid != 0)return;
// }
//using allocator_type = buddy_allocator<0,0>;
int main(int argc, char** argv) {
boot_ext_tree<8ULL*1024*1024, 16ULL>();
boot_ext_tree<8ULL*1024*1024, 4096ULL>();
boot_alloc_table<8ULL*1024*1024>();
cudaDeviceReset();
return 0;
}
|
89b8401849cd30a1a59a4ea0c0de152e9cb959d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_cell_kernel1_xdir [6][2];
static int dims_advec_cell_kernel1_xdir_h [6][2] = {0};
//user function
__device__
inline void advec_cell_kernel1_xdir_gpu(ACC<double> &pre_vol,
ACC<double> &post_vol,
const ACC<double> &volume,
const ACC<double> &vol_flux_x,
const ACC<double> &vol_flux_y,
const ACC<double> &vol_flux_z) {
pre_vol(0,0,0) = volume(0,0,0) +
( vol_flux_x(1,0,0) - vol_flux_x(0,0,0) +
vol_flux_y(0,1,0) - vol_flux_y(0,0,0) +
vol_flux_z(0,0,1) - vol_flux_z(0,0,0));
post_vol(0,0,0) = pre_vol(0,0,0) - ( vol_flux_x(1,0,0) - vol_flux_x(0,0,0));
}
__global__ void ops_advec_cell_kernel1_xdir(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[0][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[0][0] * dims_advec_cell_kernel1_xdir[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[1][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[1][0] * dims_advec_cell_kernel1_xdir[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[2][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[2][0] * dims_advec_cell_kernel1_xdir[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[3][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[3][0] * dims_advec_cell_kernel1_xdir[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[4][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[4][0] * dims_advec_cell_kernel1_xdir[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[5][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[5][0] * dims_advec_cell_kernel1_xdir[5][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_cell_kernel1_xdir[0][0], dims_advec_cell_kernel1_xdir[0][1], arg0);
ACC<double> argp1(dims_advec_cell_kernel1_xdir[1][0], dims_advec_cell_kernel1_xdir[1][1], arg1);
const ACC<double> argp2(dims_advec_cell_kernel1_xdir[2][0], dims_advec_cell_kernel1_xdir[2][1], arg2);
const ACC<double> argp3(dims_advec_cell_kernel1_xdir[3][0], dims_advec_cell_kernel1_xdir[3][1], arg3);
const ACC<double> argp4(dims_advec_cell_kernel1_xdir[4][0], dims_advec_cell_kernel1_xdir[4][1], arg4);
const ACC<double> argp5(dims_advec_cell_kernel1_xdir[5][0], dims_advec_cell_kernel1_xdir[5][1], arg5);
advec_cell_kernel1_xdir_gpu(argp0, argp1, argp2, argp3,
argp4, argp5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_xdir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_advec_cell_kernel1_xdir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,108)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(108,"advec_cell_kernel1_xdir");
OPS_kernels[108].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != dims_advec_cell_kernel1_xdir_h[0][0] || ydim0 != dims_advec_cell_kernel1_xdir_h[0][1] || xdim1 != dims_advec_cell_kernel1_xdir_h[1][0] || ydim1 != dims_advec_cell_kernel1_xdir_h[1][1] || xdim2 != dims_advec_cell_kernel1_xdir_h[2][0] || ydim2 != dims_advec_cell_kernel1_xdir_h[2][1] || xdim3 != dims_advec_cell_kernel1_xdir_h[3][0] || ydim3 != dims_advec_cell_kernel1_xdir_h[3][1] || xdim4 != dims_advec_cell_kernel1_xdir_h[4][0] || ydim4 != dims_advec_cell_kernel1_xdir_h[4][1] || xdim5 != dims_advec_cell_kernel1_xdir_h[5][0] || ydim5 != dims_advec_cell_kernel1_xdir_h[5][1]) {
dims_advec_cell_kernel1_xdir_h[0][0] = xdim0;
dims_advec_cell_kernel1_xdir_h[0][1] = ydim0;
dims_advec_cell_kernel1_xdir_h[1][0] = xdim1;
dims_advec_cell_kernel1_xdir_h[1][1] = ydim1;
dims_advec_cell_kernel1_xdir_h[2][0] = xdim2;
dims_advec_cell_kernel1_xdir_h[2][1] = ydim2;
dims_advec_cell_kernel1_xdir_h[3][0] = xdim3;
dims_advec_cell_kernel1_xdir_h[3][1] = ydim3;
dims_advec_cell_kernel1_xdir_h[4][0] = xdim4;
dims_advec_cell_kernel1_xdir_h[4][1] = ydim4;
dims_advec_cell_kernel1_xdir_h[5][0] = xdim5;
dims_advec_cell_kernel1_xdir_h[5][1] = ydim5;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_cell_kernel1_xdir, dims_advec_cell_kernel1_xdir_h, sizeof(dims_advec_cell_kernel1_xdir)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[108].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_cell_kernel1_xdir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[108].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[108].mpi_time += t2-t1;
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_xdir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 108;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 108;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_advec_cell_kernel1_xdir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(108,"advec_cell_kernel1_xdir");
}
ops_enqueue_kernel(desc);
}
#endif
|
89b8401849cd30a1a59a4ea0c0de152e9cb959d6.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_advec_cell_kernel1_xdir [6][2];
static int dims_advec_cell_kernel1_xdir_h [6][2] = {0};
//user function
__device__
inline void advec_cell_kernel1_xdir_gpu(ACC<double> &pre_vol,
ACC<double> &post_vol,
const ACC<double> &volume,
const ACC<double> &vol_flux_x,
const ACC<double> &vol_flux_y,
const ACC<double> &vol_flux_z) {
pre_vol(0,0,0) = volume(0,0,0) +
( vol_flux_x(1,0,0) - vol_flux_x(0,0,0) +
vol_flux_y(0,1,0) - vol_flux_y(0,0,0) +
vol_flux_z(0,0,1) - vol_flux_z(0,0,0));
post_vol(0,0,0) = pre_vol(0,0,0) - ( vol_flux_x(1,0,0) - vol_flux_x(0,0,0));
}
__global__ void ops_advec_cell_kernel1_xdir(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[0][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[0][0] * dims_advec_cell_kernel1_xdir[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[1][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[1][0] * dims_advec_cell_kernel1_xdir[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[2][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[2][0] * dims_advec_cell_kernel1_xdir[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[3][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[3][0] * dims_advec_cell_kernel1_xdir[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[4][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[4][0] * dims_advec_cell_kernel1_xdir[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_cell_kernel1_xdir[5][0] + idx_z * 1*1 * dims_advec_cell_kernel1_xdir[5][0] * dims_advec_cell_kernel1_xdir[5][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_cell_kernel1_xdir[0][0], dims_advec_cell_kernel1_xdir[0][1], arg0);
ACC<double> argp1(dims_advec_cell_kernel1_xdir[1][0], dims_advec_cell_kernel1_xdir[1][1], arg1);
const ACC<double> argp2(dims_advec_cell_kernel1_xdir[2][0], dims_advec_cell_kernel1_xdir[2][1], arg2);
const ACC<double> argp3(dims_advec_cell_kernel1_xdir[3][0], dims_advec_cell_kernel1_xdir[3][1], arg3);
const ACC<double> argp4(dims_advec_cell_kernel1_xdir[4][0], dims_advec_cell_kernel1_xdir[4][1], arg4);
const ACC<double> argp5(dims_advec_cell_kernel1_xdir[5][0], dims_advec_cell_kernel1_xdir[5][1], arg5);
advec_cell_kernel1_xdir_gpu(argp0, argp1, argp2, argp3,
argp4, argp5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_xdir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_advec_cell_kernel1_xdir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,108)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(108,"advec_cell_kernel1_xdir");
OPS_kernels[108].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != dims_advec_cell_kernel1_xdir_h[0][0] || ydim0 != dims_advec_cell_kernel1_xdir_h[0][1] || xdim1 != dims_advec_cell_kernel1_xdir_h[1][0] || ydim1 != dims_advec_cell_kernel1_xdir_h[1][1] || xdim2 != dims_advec_cell_kernel1_xdir_h[2][0] || ydim2 != dims_advec_cell_kernel1_xdir_h[2][1] || xdim3 != dims_advec_cell_kernel1_xdir_h[3][0] || ydim3 != dims_advec_cell_kernel1_xdir_h[3][1] || xdim4 != dims_advec_cell_kernel1_xdir_h[4][0] || ydim4 != dims_advec_cell_kernel1_xdir_h[4][1] || xdim5 != dims_advec_cell_kernel1_xdir_h[5][0] || ydim5 != dims_advec_cell_kernel1_xdir_h[5][1]) {
dims_advec_cell_kernel1_xdir_h[0][0] = xdim0;
dims_advec_cell_kernel1_xdir_h[0][1] = ydim0;
dims_advec_cell_kernel1_xdir_h[1][0] = xdim1;
dims_advec_cell_kernel1_xdir_h[1][1] = ydim1;
dims_advec_cell_kernel1_xdir_h[2][0] = xdim2;
dims_advec_cell_kernel1_xdir_h[2][1] = ydim2;
dims_advec_cell_kernel1_xdir_h[3][0] = xdim3;
dims_advec_cell_kernel1_xdir_h[3][1] = ydim3;
dims_advec_cell_kernel1_xdir_h[4][0] = xdim4;
dims_advec_cell_kernel1_xdir_h[4][1] = ydim4;
dims_advec_cell_kernel1_xdir_h[5][0] = xdim5;
dims_advec_cell_kernel1_xdir_h[5][1] = ydim5;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_cell_kernel1_xdir, dims_advec_cell_kernel1_xdir_h, sizeof(dims_advec_cell_kernel1_xdir)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[108].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_cell_kernel1_xdir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[108].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[108].mpi_time += t2-t1;
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[108].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_xdir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 108;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 108;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_advec_cell_kernel1_xdir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(108,"advec_cell_kernel1_xdir");
}
ops_enqueue_kernel(desc);
}
#endif
|
5c61450ad68482157766fee4b34cbbcef65968f8.hip
|
// !!! This is a file automatically generated by hipify!!!
// example1.cpp : Defines the entry point for the console application.
//
#include <stdio.h>
#include <hip/hip_runtime.h>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
}
// main routine that executes on the host
int main(void)
{
float *a_h, *a_d; // Pointer to host & device arrays
const int N = 10; // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &a_d, size); // Allocate array on device
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++) a_h[i] = (float)i;
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
// Do calculation on device:
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
hipLaunchKernelGGL(( square_array) , dim3(n_blocks), dim3(block_size) , 0, 0, a_d, N);
// Retrieve result from device and store it in host array
hipMemcpy(a_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost);
// Print results
for (int i=0; i<N; i++) printf("%d %f\n", i, a_h[i]);
// Cleanup
free(a_h); hipFree(a_d);
}
|
5c61450ad68482157766fee4b34cbbcef65968f8.cu
|
// example1.cpp : Defines the entry point for the console application.
//
#include <stdio.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
}
// main routine that executes on the host
int main(void)
{
float *a_h, *a_d; // Pointer to host & device arrays
const int N = 10; // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array on device
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++) a_h[i] = (float)i;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
// Do calculation on device:
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
square_array <<< n_blocks, block_size >>> (a_d, N);
// Retrieve result from device and store it in host array
cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// Print results
for (int i=0; i<N; i++) printf("%d %f\n", i, a_h[i]);
// Cleanup
free(a_h); cudaFree(a_d);
}
|
a0012f31c20ed7c8c7e3a4255543616fef6442c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
typedef struct Matrix {
int width;
int height;
float *elements;
} Mat;
#define BLOCK_SIZE 16
#define w 4096
#define h 4096
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C);
int main() {
Mat h_A;
h_A.width = w;
h_A.height = h;
h_A.elements = (float *)malloc(sizeof(float) * h_A.width * h_A.height);
for (int i = 0; i < h_A.height; ++i) {
for (int j = 0; j < h_A.width; ++j) {
h_A.elements[i * h_A.width + j] = 1;
}
}
Mat h_B;
h_B.width = w;
h_B.height = h;
h_B.elements = (float *)malloc(sizeof(float) * h_B.width * h_B.height);
for (int i = 0; i < h_B.height; ++i) {
for (int j = 0; j < h_B.width; ++j) {
h_B.elements[i * h_B.width + j] = 1;
}
}
Mat h_C;
h_C.width = w;
h_C.height = h;
h_C.elements = (float *)malloc(sizeof(float) * h_C.width * h_C.height);
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
h_C.elements[i * h_C.width + j] = 0;
}
}
MatMul(h_A, h_B, h_C);
float tmp_value = w;
float sum_error = 0;
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
sum_error += fabs(tmp_value - h_C.elements[i * h_C.width + j]);
}
}
cout << "sum error : " << sum_error << endl;
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
return 0;
}
void MatMul(const Matrix A, const Matrix B, Matrix C) {
Mat d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc((void **)&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Mat d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc((void **)&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
Mat d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc((void **)&d_C.elements, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(256, 256);
cout << dimGrid.x << " " << dimGrid.y << endl;
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < C.height;
row += gridDim.y * blockDim.y) {
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < C.width;
col += gridDim.x * blockDim.x) {
float CValue = 0;
for (int e = 0; e < A.width; ++e) {
CValue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = CValue;
}
}
}
|
a0012f31c20ed7c8c7e3a4255543616fef6442c8.cu
|
#include <iostream>
using namespace std;
typedef struct Matrix {
int width;
int height;
float *elements;
} Mat;
#define BLOCK_SIZE 16
#define w 4096
#define h 4096
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C);
int main() {
Mat h_A;
h_A.width = w;
h_A.height = h;
h_A.elements = (float *)malloc(sizeof(float) * h_A.width * h_A.height);
for (int i = 0; i < h_A.height; ++i) {
for (int j = 0; j < h_A.width; ++j) {
h_A.elements[i * h_A.width + j] = 1;
}
}
Mat h_B;
h_B.width = w;
h_B.height = h;
h_B.elements = (float *)malloc(sizeof(float) * h_B.width * h_B.height);
for (int i = 0; i < h_B.height; ++i) {
for (int j = 0; j < h_B.width; ++j) {
h_B.elements[i * h_B.width + j] = 1;
}
}
Mat h_C;
h_C.width = w;
h_C.height = h;
h_C.elements = (float *)malloc(sizeof(float) * h_C.width * h_C.height);
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
h_C.elements[i * h_C.width + j] = 0;
}
}
MatMul(h_A, h_B, h_C);
float tmp_value = w;
float sum_error = 0;
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
sum_error += fabs(tmp_value - h_C.elements[i * h_C.width + j]);
}
}
cout << "sum error : " << sum_error << endl;
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
return 0;
}
void MatMul(const Matrix A, const Matrix B, Matrix C) {
Mat d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void **)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Mat d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void **)&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
Mat d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc((void **)&d_C.elements, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(256, 256);
cout << dimGrid.x << " " << dimGrid.y << endl;
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < C.height;
row += gridDim.y * blockDim.y) {
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < C.width;
col += gridDim.x * blockDim.x) {
float CValue = 0;
for (int e = 0; e < A.width; ++e) {
CValue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = CValue;
}
}
}
|
18bca432000cb32eb6be7989fb7af5d6b531d40d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "kernels.h"
TAG_HIDDEN __global__ void kernel (void){
}
TAG_PUBLIC int run_tests(void) {
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
return (int)hipDeviceSynchronize();
}
|
18bca432000cb32eb6be7989fb7af5d6b531d40d.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#include "kernels.h"
TAG_HIDDEN __global__ void kernel (void){
}
TAG_PUBLIC int run_tests(void) {
kernel<<<1,1>>>();
return (int)cudaDeviceSynchronize();
}
|
2057cac7d403a21f28bb68d38320419a49c206de.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <windows.h>
#define _USE_MATH_DEFINES
#include "math.h"
//#define N 2048
#define N 8388608
float data_real[N];
float data_imag[N];
float tw_real[N/2];
float tw_imag[N/2];
__global__ void fft(float* data_real_d,float* data_imag_d,float* tw_real_d,float* tw_imag_d,int p)
{
unsigned int x,block,sub,index,tmp2;
float tw_real_reg;
float tw_imag_reg;
unsigned int power;
float tmp;
float real,real2,imag,imag2;
index=threadIdx.x+blockIdx.x*blockDim.x;
power=__powf(2,p);
//determine which block the thread is in(not cuda block)
//x=N/(power*2);
//block=(index)/x;
x=N>>(p+1);
tmp2=__log2f(x);
block=index>>tmp2;
//sub is the subscript of the array where the thread should get his element1 for processing
sub=index+(x*block);
//issue request for real parts
real=data_real_d[sub];
real2=data_real_d[sub+x];
//fetch twiddle factor
//tmp=(index)%x;
tmp=(index)&(x-1);
tw_real_reg=tw_real_d[(int)tmp*power];
tw_imag_reg=tw_imag_d[(int)tmp*power];
//issue request for imaginary parts
imag=data_imag_d[sub];
imag2=data_imag_d[sub+x];
//butterfly real parts
tmp=real+real2;
real2=real-real2;
real=tmp;
//write back real results of butterfly,only this part is written because we still need to twiddle the other
data_real_d[sub]=real;
//butterfly imag part
tmp=imag+imag2;
imag2=imag-imag2;
imag=tmp;
//multiply by twiddle
tmp=real2;
real2=real2*tw_real_reg-imag2*tw_imag_reg;
data_real_d[sub+x]=real2;
imag2=tmp*tw_imag_reg+imag2*tw_real_reg;
//write back imag result of butterfly
data_imag_d[sub]=imag;
data_imag_d[sub+x]=imag2;
}
void bit_reversal()
{
long i,i1,j,k,i2;
double c1,c2,tx,ty;
i2 = N >> 1;
j = 0;
for (i=0;i<N-1;i++) {
if (i < j) {
tx = data_real[i];
ty = data_imag[i];
data_real[i] = data_real[j];
data_imag[i] = data_imag[j];
data_real[j] = tx;
data_imag[j] = ty;
}
k = i2;
while (k <= j) {
j -= k;
k >>= 1;
}
j += k;
}
}
void compute_twiddle()
{
for(int i=0;i<N/2;i++)
{
tw_real[i]=cos(2*M_PI*i/N);
tw_imag[i]=-sin(2*M_PI*i/N);
}
}
int main( int argc, char** argv)
{
for(int i=0;i<N;i++)
{
if(i<N/2)
{data_real[i]=1;
data_imag[i]=0;}
else{
data_real[i]=0;
data_imag[i]=0;
}
}
//printf("data[0]=%f + %f i\n",data_real[0],data_imag[0]);
/// for(int i=0;i<N;i++)
// {
// printf("data[%d]=%f + %f i\n",i,data_real[i],data_imag[i]);
// }
compute_twiddle();
int passes=log((float)N)/log((float)2);
float* data_real_d;
float* data_imag_d;
float* tw_real_d;
float* tw_imag_d;
hipMalloc((void**)&data_real_d,N*sizeof(float));
hipMalloc((void**)&data_imag_d,N*sizeof(float));
hipMalloc((void**)&tw_imag_d,(N/2)*sizeof(float));
hipMalloc((void**)&tw_real_d,(N/2)*sizeof(float));
hipMemcpy(data_real_d,data_real,sizeof(float)*N,hipMemcpyHostToDevice);
hipMemcpy(data_imag_d,data_imag,sizeof(float)*N,hipMemcpyHostToDevice);
hipMemcpy(tw_real_d,tw_real,sizeof(float)*(N/2),hipMemcpyHostToDevice);
hipMemcpy(tw_imag_d,tw_imag,sizeof(float)*(N/2),hipMemcpyHostToDevice);
dim3 dimBlock(512,1,1);
dim3 dimGrid(N/1024,1,1);
hipDeviceSynchronize();
long int before = GetTickCount();
hipEvent_t start, stop; float time;
hipEventCreate(&start);
hipEventCreate(&stop); hipEventRecord( start, 0 );
for(int i=0;i<passes;i++)
hipLaunchKernelGGL(({fft), dim3(dimGrid),dim3(dimBlock), 0, 0, data_real_d,data_imag_d,tw_real_d,tw_imag_d,i);}
hipDeviceSynchronize();
long int after = GetTickCount();
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
const char* err=hipGetErrorString(hipGetLastError());
for(int i=0;i<10;i++)
{printf("%c",err[i]);}
printf("\n");
printf("%d ms\n",after-before);
hipMemcpy(data_real,data_real_d,4*N,hipMemcpyDeviceToHost);
hipMemcpy(data_imag,data_imag_d,4*N,hipMemcpyDeviceToHost);
hipFree(data_real_d);
hipFree(data_imag_d);
hipFree(tw_real_d);
hipFree(tw_imag_d);
bit_reversal();
for(int i=N-10;i<N;i++)
{
printf("data[%d]=%f + %f i\n",i,data_real[i],data_imag[i]);
}
//printf("data[0]=%f + %f i\n",data_real[0],data_imag[0]);
//printf("data[1]=%f + %f i\n",data_real[21],data_imag[21]);
printf("cuda timer record %f ms",time);
scanf("%d");
}
|
2057cac7d403a21f28bb68d38320419a49c206de.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <windows.h>
#define _USE_MATH_DEFINES
#include "math.h"
//#define N 2048
#define N 8388608
float data_real[N];
float data_imag[N];
float tw_real[N/2];
float tw_imag[N/2];
__global__ void fft(float* data_real_d,float* data_imag_d,float* tw_real_d,float* tw_imag_d,int p)
{
unsigned int x,block,sub,index,tmp2;
float tw_real_reg;
float tw_imag_reg;
unsigned int power;
float tmp;
float real,real2,imag,imag2;
index=threadIdx.x+blockIdx.x*blockDim.x;
power=__powf(2,p);
//determine which block the thread is in(not cuda block)
//x=N/(power*2);
//block=(index)/x;
x=N>>(p+1);
tmp2=__log2f(x);
block=index>>tmp2;
//sub is the subscript of the array where the thread should get his element1 for processing
sub=index+(x*block);
//issue request for real parts
real=data_real_d[sub];
real2=data_real_d[sub+x];
//fetch twiddle factor
//tmp=(index)%x;
tmp=(index)&(x-1);
tw_real_reg=tw_real_d[(int)tmp*power];
tw_imag_reg=tw_imag_d[(int)tmp*power];
//issue request for imaginary parts
imag=data_imag_d[sub];
imag2=data_imag_d[sub+x];
//butterfly real parts
tmp=real+real2;
real2=real-real2;
real=tmp;
//write back real results of butterfly,only this part is written because we still need to twiddle the other
data_real_d[sub]=real;
//butterfly imag part
tmp=imag+imag2;
imag2=imag-imag2;
imag=tmp;
//multiply by twiddle
tmp=real2;
real2=real2*tw_real_reg-imag2*tw_imag_reg;
data_real_d[sub+x]=real2;
imag2=tmp*tw_imag_reg+imag2*tw_real_reg;
//write back imag result of butterfly
data_imag_d[sub]=imag;
data_imag_d[sub+x]=imag2;
}
void bit_reversal()
{
long i,i1,j,k,i2;
double c1,c2,tx,ty;
i2 = N >> 1;
j = 0;
for (i=0;i<N-1;i++) {
if (i < j) {
tx = data_real[i];
ty = data_imag[i];
data_real[i] = data_real[j];
data_imag[i] = data_imag[j];
data_real[j] = tx;
data_imag[j] = ty;
}
k = i2;
while (k <= j) {
j -= k;
k >>= 1;
}
j += k;
}
}
void compute_twiddle()
{
for(int i=0;i<N/2;i++)
{
tw_real[i]=cos(2*M_PI*i/N);
tw_imag[i]=-sin(2*M_PI*i/N);
}
}
int main( int argc, char** argv)
{
for(int i=0;i<N;i++)
{
if(i<N/2)
{data_real[i]=1;
data_imag[i]=0;}
else{
data_real[i]=0;
data_imag[i]=0;
}
}
//printf("data[0]=%f + %f i\n",data_real[0],data_imag[0]);
/// for(int i=0;i<N;i++)
// {
// printf("data[%d]=%f + %f i\n",i,data_real[i],data_imag[i]);
// }
compute_twiddle();
int passes=log((float)N)/log((float)2);
float* data_real_d;
float* data_imag_d;
float* tw_real_d;
float* tw_imag_d;
cudaMalloc((void**)&data_real_d,N*sizeof(float));
cudaMalloc((void**)&data_imag_d,N*sizeof(float));
cudaMalloc((void**)&tw_imag_d,(N/2)*sizeof(float));
cudaMalloc((void**)&tw_real_d,(N/2)*sizeof(float));
cudaMemcpy(data_real_d,data_real,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(data_imag_d,data_imag,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(tw_real_d,tw_real,sizeof(float)*(N/2),cudaMemcpyHostToDevice);
cudaMemcpy(tw_imag_d,tw_imag,sizeof(float)*(N/2),cudaMemcpyHostToDevice);
dim3 dimBlock(512,1,1);
dim3 dimGrid(N/1024,1,1);
cudaThreadSynchronize();
long int before = GetTickCount();
cudaEvent_t start, stop; float time;
cudaEventCreate(&start);
cudaEventCreate(&stop); cudaEventRecord( start, 0 );
for(int i=0;i<passes;i++)
{fft<<<dimGrid,dimBlock>>>(data_real_d,data_imag_d,tw_real_d,tw_imag_d,i);}
cudaThreadSynchronize();
long int after = GetTickCount();
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
const char* err=cudaGetErrorString(cudaGetLastError());
for(int i=0;i<10;i++)
{printf("%c",err[i]);}
printf("\n");
printf("%d ms\n",after-before);
cudaMemcpy(data_real,data_real_d,4*N,cudaMemcpyDeviceToHost);
cudaMemcpy(data_imag,data_imag_d,4*N,cudaMemcpyDeviceToHost);
cudaFree(data_real_d);
cudaFree(data_imag_d);
cudaFree(tw_real_d);
cudaFree(tw_imag_d);
bit_reversal();
for(int i=N-10;i<N;i++)
{
printf("data[%d]=%f + %f i\n",i,data_real[i],data_imag[i]);
}
//printf("data[0]=%f + %f i\n",data_real[0],data_imag[0]);
//printf("data[1]=%f + %f i\n",data_real[21],data_imag[21]);
printf("cuda timer record %f ms",time);
scanf("%d");
}
|
14a168e08b9379f3735645ba2838e2611498ed5d.hip
|
// !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#ifndef BLOCKSIZE // run 16, 32, 64
#define BLOCKSIZE 32 // number of threads per block
#endif
#ifndef SIZE
#define SIZE 16000 // 16k, 32k, 64k, 128k, 256k and 512k
#endif
//#ifndef NUMTRIALS
//#define NUMTRIALS 10 // to make the timing more accurate
//#endif
#ifndef TOLERANCE
#define TOLERANCE 0.00001f // tolerance to relative error
#endif
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
float Ranf( float low, float high ){
float r = (float)rand(); // 0 - RAND_MAX
float t = r / (float) RAND_MAX; // 0. - 1.
return low + t * ( high - low );
}
void TimeOfDaySeed( ){
struct tm y2k = { 0 };
y2k.tm_hour = 0;
y2k.tm_min = 0;
y2k.tm_sec = 0;
y2k.tm_year = 100;
y2k.tm_mon = 0;
y2k.tm_mday = 1;
time_t timer;
time( &timer );
double seconds = difftime( timer, mktime(&y2k) );
unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds
srand( seed );
}
// Monte Carlo Simulation (CUDA Kernel)
// int array C holds the total number of 'hits' per block in each index
__global__ void MonteCarlo( float *X, float *Y, float *R, int *C ){
__shared__ int hits[BLOCKSIZE];
unsigned int numItems = blockDim.x;
unsigned int tnum = threadIdx.x;
unsigned int wgNum = blockIdx.x;
unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
hits[tnum] = 1;
// solve for the intersection using the quadratic formula:
float a = 2.;
float b = -2.*( X[gid] + Y[gid] );
float c = X[gid]*X[gid] + Y[gid]*Y[gid] - R[gid]*R[gid];
float d = b*b - 4.*a*c;
// If d is less than 0., then the circle was completely missed. (Case A)
if( d < 0.){
hits[tnum] = 0;
}
else{
// else it hits the circle...
// get the first intersection:
d = sqrt( d );
float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle
float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
// If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B)
if ( tmin < 0.){
hits[tnum] = 0;
}
else{
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin;
// get the unitized normal vector at the point of intersection:
float nx = xcir - X[gid];
float ny = ycir - Y[gid];
float n = sqrt( nx * nx + ny * ny );
nx /= n; // unit vector
ny /= n; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt( inx * inx + iny * iny );
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx*nx + iny*ny;
//float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence`
float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence`
// find out if it hits the infinite plate:
float t = ( 0. - ycir ) / outy;
// If t is less than 0., then the reflected beam went up instead of down
if( t < 0.){
hits[tnum] = 0;
}
}
}
// add up all hits
for (int offset = 1; offset < numItems; offset *= 2){
int mask = 2 * offset - 1;
__syncthreads();
if ((tnum & mask) == 0){
hits[tnum] += hits[tnum + offset];
}
}
__syncthreads();
// record results to array of hits per block
if (tnum == 0)
C[wgNum] = hits[0];
}
// main program:
int main( int argc, char* argv[ ] ){
int dev = findCudaDevice(argc, (const char **)argv);
TimeOfDaySeed();
// allocate host memory:
float * hxcs = new float [ SIZE ];
float * hycs = new float [ SIZE ];
float * hrs = new float [ SIZE ];
int * hC = new int [ SIZE/BLOCKSIZE ];
// fill in arrays with random values in the given ranges:
for( int i = 0; i < SIZE; i++ ){
hxcs[i] = Ranf(XCMIN, XCMAX);
hycs[i] = Ranf(YCMIN, YCMAX);
hrs[i] = Ranf(RMIN, RMAX);
}
// allocate device memory:
float *dxcs, *dycs, *drs;
int *dC;
dim3 dimsX( SIZE, 1, 1 );
dim3 dimsY( SIZE, 1, 1 );
dim3 dimsR( SIZE, 1, 1 );
dim3 dimsC( SIZE/BLOCKSIZE, 1, 1 );
hipError_t status;
status = hipMalloc( reinterpret_cast<void **>(&dxcs), SIZE*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( reinterpret_cast<void **>(&dycs), SIZE*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( reinterpret_cast<void **>(&drs), SIZE*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( reinterpret_cast<void **>(&dC), (SIZE/BLOCKSIZE)*sizeof(int) );
checkCudaErrors( status );
// copy host memory to the device:
status = hipMemcpy( dxcs, hxcs, SIZE*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
status = hipMemcpy( dycs, hycs, SIZE*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
status = hipMemcpy( drs, hrs, SIZE*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid( SIZE / threads.x, 1, 1 );
// Create and start timer
hipDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
hipEvent_t start, stop;
status = hipEventCreate( &start );
checkCudaErrors( status );
status = hipEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = hipEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads) , 0, 0, dxcs, dycs, drs, dC);
// record the stop event:
status = hipEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = hipEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = hipEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double TrialsPerSecond = (float)SIZE / secondsTotal;
double megaTrialsPerSecond = TrialsPerSecond / 1000000.;
fprintf( stderr, "Blocksize = %d, NumTrials = %d, MegaTrials/Second = %10.6lf\n", BLOCKSIZE, SIZE, megaTrialsPerSecond );
// copy result from the device to the host:
status = hipMemcpy( hC, dC, (SIZE/BLOCKSIZE)*sizeof(float), hipMemcpyDeviceToHost );
checkCudaErrors( status );
// check the sum of all recordings in C:
int sumHits = 0;
for(int i = 0; i < SIZE/BLOCKSIZE; i++ ){
sumHits += hC[i];
}
// probability around 42
fprintf( stderr, "probability = %4.6lf\n", (float)sumHits/(float)SIZE);
// clean up memory:
delete [ ] hxcs;
delete [ ] hycs;
delete [ ] hrs;
delete [ ] hC;
status = hipFree( dxcs );
checkCudaErrors( status );
status = hipFree( dycs );
checkCudaErrors( status );
status = hipFree( drs );
checkCudaErrors( status );
status = hipFree( dC );
checkCudaErrors( status );
return 0;
}
|
14a168e08b9379f3735645ba2838e2611498ed5d.cu
|
// System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#ifndef BLOCKSIZE // run 16, 32, 64
#define BLOCKSIZE 32 // number of threads per block
#endif
#ifndef SIZE
#define SIZE 16000 // 16k, 32k, 64k, 128k, 256k and 512k
#endif
//#ifndef NUMTRIALS
//#define NUMTRIALS 10 // to make the timing more accurate
//#endif
#ifndef TOLERANCE
#define TOLERANCE 0.00001f // tolerance to relative error
#endif
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
float Ranf( float low, float high ){
float r = (float)rand(); // 0 - RAND_MAX
float t = r / (float) RAND_MAX; // 0. - 1.
return low + t * ( high - low );
}
void TimeOfDaySeed( ){
struct tm y2k = { 0 };
y2k.tm_hour = 0;
y2k.tm_min = 0;
y2k.tm_sec = 0;
y2k.tm_year = 100;
y2k.tm_mon = 0;
y2k.tm_mday = 1;
time_t timer;
time( &timer );
double seconds = difftime( timer, mktime(&y2k) );
unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds
srand( seed );
}
// Monte Carlo Simulation (CUDA Kernel)
// int array C holds the total number of 'hits' per block in each index
__global__ void MonteCarlo( float *X, float *Y, float *R, int *C ){
__shared__ int hits[BLOCKSIZE];
unsigned int numItems = blockDim.x;
unsigned int tnum = threadIdx.x;
unsigned int wgNum = blockIdx.x;
unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
hits[tnum] = 1;
// solve for the intersection using the quadratic formula:
float a = 2.;
float b = -2.*( X[gid] + Y[gid] );
float c = X[gid]*X[gid] + Y[gid]*Y[gid] - R[gid]*R[gid];
float d = b*b - 4.*a*c;
// If d is less than 0., then the circle was completely missed. (Case A)
if( d < 0.){
hits[tnum] = 0;
}
else{
// else it hits the circle...
// get the first intersection:
d = sqrt( d );
float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle
float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
// If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B)
if ( tmin < 0.){
hits[tnum] = 0;
}
else{
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin;
// get the unitized normal vector at the point of intersection:
float nx = xcir - X[gid];
float ny = ycir - Y[gid];
float n = sqrt( nx * nx + ny * ny );
nx /= n; // unit vector
ny /= n; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt( inx * inx + iny * iny );
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx*nx + iny*ny;
//float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence`
float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence`
// find out if it hits the infinite plate:
float t = ( 0. - ycir ) / outy;
// If t is less than 0., then the reflected beam went up instead of down
if( t < 0.){
hits[tnum] = 0;
}
}
}
// add up all hits
for (int offset = 1; offset < numItems; offset *= 2){
int mask = 2 * offset - 1;
__syncthreads();
if ((tnum & mask) == 0){
hits[tnum] += hits[tnum + offset];
}
}
__syncthreads();
// record results to array of hits per block
if (tnum == 0)
C[wgNum] = hits[0];
}
// main program:
int main( int argc, char* argv[ ] ){
int dev = findCudaDevice(argc, (const char **)argv);
TimeOfDaySeed();
// allocate host memory:
float * hxcs = new float [ SIZE ];
float * hycs = new float [ SIZE ];
float * hrs = new float [ SIZE ];
int * hC = new int [ SIZE/BLOCKSIZE ];
// fill in arrays with random values in the given ranges:
for( int i = 0; i < SIZE; i++ ){
hxcs[i] = Ranf(XCMIN, XCMAX);
hycs[i] = Ranf(YCMIN, YCMAX);
hrs[i] = Ranf(RMIN, RMAX);
}
// allocate device memory:
float *dxcs, *dycs, *drs;
int *dC;
dim3 dimsX( SIZE, 1, 1 );
dim3 dimsY( SIZE, 1, 1 );
dim3 dimsR( SIZE, 1, 1 );
dim3 dimsC( SIZE/BLOCKSIZE, 1, 1 );
cudaError_t status;
status = cudaMalloc( reinterpret_cast<void **>(&dxcs), SIZE*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( reinterpret_cast<void **>(&dycs), SIZE*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( reinterpret_cast<void **>(&drs), SIZE*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( reinterpret_cast<void **>(&dC), (SIZE/BLOCKSIZE)*sizeof(int) );
checkCudaErrors( status );
// copy host memory to the device:
status = cudaMemcpy( dxcs, hxcs, SIZE*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
status = cudaMemcpy( dycs, hycs, SIZE*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
status = cudaMemcpy( drs, hrs, SIZE*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid( SIZE / threads.x, 1, 1 );
// Create and start timer
cudaDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
cudaEvent_t start, stop;
status = cudaEventCreate( &start );
checkCudaErrors( status );
status = cudaEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = cudaEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
MonteCarlo<<< grid, threads >>>( dxcs, dycs, drs, dC);
// record the stop event:
status = cudaEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = cudaEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = cudaEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double TrialsPerSecond = (float)SIZE / secondsTotal;
double megaTrialsPerSecond = TrialsPerSecond / 1000000.;
fprintf( stderr, "Blocksize = %d, NumTrials = %d, MegaTrials/Second = %10.6lf\n", BLOCKSIZE, SIZE, megaTrialsPerSecond );
// copy result from the device to the host:
status = cudaMemcpy( hC, dC, (SIZE/BLOCKSIZE)*sizeof(float), cudaMemcpyDeviceToHost );
checkCudaErrors( status );
// check the sum of all recordings in C:
int sumHits = 0;
for(int i = 0; i < SIZE/BLOCKSIZE; i++ ){
sumHits += hC[i];
}
// probability around 42
fprintf( stderr, "probability = %4.6lf\n", (float)sumHits/(float)SIZE);
// clean up memory:
delete [ ] hxcs;
delete [ ] hycs;
delete [ ] hrs;
delete [ ] hC;
status = cudaFree( dxcs );
checkCudaErrors( status );
status = cudaFree( dycs );
checkCudaErrors( status );
status = cudaFree( drs );
checkCudaErrors( status );
status = cudaFree( dC );
checkCudaErrors( status );
return 0;
}
|
78b1e88e1a0e7e3a224601fe52f6c5a2e6e810de.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
College: University of Massachusetts Lowell
EECE 7110:High-Performance Comp. on GPUs
Semester: Spring 2018
Student : 01639617
Project : Assignment_3
Professor : Dr.Hang Liu
Due date: 4/16/2018
Authors : Sai Sri Devesh Kadambari
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
using namespace std;
#define zero 0
__global__ void gpu_up_swing(int *a,int *c, int m)
{
__shared__ int smem[1];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int p=16384,n=1,pwr; //p=m/blockdim*griddim
while((tid<(p*n))&&(tid<(32000000)))
{
__syncthreads(); //wait until all the threads in the block reach this point
for(int depth =0;depth< __logf ( 16384) ;depth++)
{ pwr=__powf(depth, 2);
pwr_1=__powf(depth, 2+1);
if(tid%pwr_1==0){
a[tid+pwr_1-1]+=a[tid+pwr_1-1];
}
__syncthreads();
}
gpu_down_swing(*a,*c,m);
smem[1]=a[p*n];
tid+=p;
a[tid]+=smem[1];
n++;
__syncthreads();
}
}
__global__ void gpu_down_swing(int *a,int *c, int m)
{
__shared__ int smem[1];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int p=16384,n=1,pwr; //p=m/blockdim*griddim
while((tid<(p*n))&&(tid<(32000000)))
{
__syncthreads(); //wait until all the threads in the block reach this point
for(int depth =0;depth< __logf ( 16384) ;depth++)
{ pwr=__powf(depth, 2);
pwr_1=__powf(depth, 2+1);
a[tid+pwr_1-1]+=a[tid+pwr_1-1];
__syncthreads();
}
smem[1]=a[p*n];
tid+=p;
a[tid]+=smem[1];
n++;
__syncthreads();
}
}
int main(int argc, char const *argv[])
{ int m;
printf("please type in m(size) \n");
scanf("%d", &m);
// allocate memory in host RAM, h_cc is used to store CPU result
int *h_a, *h_c;// *h_cc;
hipHostMalloc((void **) &h_a, sizeof(int)*m);
hipHostMalloc((void **) &h_c, sizeof(int)*m);
for (int i = 0; i < m; ++i) {
h_a[i] = rand() % 1024; //loading random values
}
clock_t t;
t = clock();
int *d_a,*d_c;
hipMalloc((void **) &d_a, sizeof(int)*m);
hipMalloc((void **) &d_c, sizeof(int)*m);
hipMemcpy(d_a, h_a, sizeof(int)*m, hipMemcpyHostToDevice);
dim3 dimGrid(128);
dim3 dimBlock(128);
hipLaunchKernelGGL(( gpu_up_swing), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a,d_c, m);
hipMemcpy(h_c, d_c, sizeof(int)*m, hipMemcpyDeviceToHost);hipLaunchKernelGGL((
gpu_up_swing), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a,d_c, m);
hipDeviceSynchronize();
t = clock()-t;
double time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("Time elapsed on operation of %d: %lf ms.\n\n", m,(time_taken/1000));
hipFree(d_a);
hipFree(d_c);
hipHostFree(h_a);
hipHostFree(h_c);
//hipHostFree(h_cc);
return 0;
}
|
78b1e88e1a0e7e3a224601fe52f6c5a2e6e810de.cu
|
/*
College: University of Massachusetts Lowell
EECE 7110:High-Performance Comp. on GPUs
Semester: Spring 2018
Student : 01639617
Project : Assignment_3
Professor : Dr.Hang Liu
Due date: 4/16/2018
Authors : Sai Sri Devesh Kadambari
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
using namespace std;
#define zero 0
__global__ void gpu_up_swing(int *a,int *c, int m)
{
__shared__ int smem[1];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int p=16384,n=1,pwr; //p=m/blockdim*griddim
while((tid<(p*n))&&(tid<(32000000)))
{
__syncthreads(); //wait until all the threads in the block reach this point
for(int depth =0;depth< __logf ( 16384) ;depth++)
{ pwr=__powf(depth, 2);
pwr_1=__powf(depth, 2+1);
if(tid%pwr_1==0){
a[tid+pwr_1-1]+=a[tid+pwr_1-1];
}
__syncthreads();
}
gpu_down_swing(*a,*c,m);
smem[1]=a[p*n];
tid+=p;
a[tid]+=smem[1];
n++;
__syncthreads();
}
}
__global__ void gpu_down_swing(int *a,int *c, int m)
{
__shared__ int smem[1];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int p=16384,n=1,pwr; //p=m/blockdim*griddim
while((tid<(p*n))&&(tid<(32000000)))
{
__syncthreads(); //wait until all the threads in the block reach this point
for(int depth =0;depth< __logf ( 16384) ;depth++)
{ pwr=__powf(depth, 2);
pwr_1=__powf(depth, 2+1);
a[tid+pwr_1-1]+=a[tid+pwr_1-1];
__syncthreads();
}
smem[1]=a[p*n];
tid+=p;
a[tid]+=smem[1];
n++;
__syncthreads();
}
}
int main(int argc, char const *argv[])
{ int m;
printf("please type in m(size) \n");
scanf("%d", &m);
// allocate memory in host RAM, h_cc is used to store CPU result
int *h_a, *h_c;// *h_cc;
cudaMallocHost((void **) &h_a, sizeof(int)*m);
cudaMallocHost((void **) &h_c, sizeof(int)*m);
for (int i = 0; i < m; ++i) {
h_a[i] = rand() % 1024; //loading random values
}
clock_t t;
t = clock();
int *d_a,*d_c;
cudaMalloc((void **) &d_a, sizeof(int)*m);
cudaMalloc((void **) &d_c, sizeof(int)*m);
cudaMemcpy(d_a, h_a, sizeof(int)*m, cudaMemcpyHostToDevice);
dim3 dimGrid(128);
dim3 dimBlock(128);
gpu_up_swing<<<dimGrid, dimBlock>>>(d_a,d_c, m);
cudaMemcpy(h_c, d_c, sizeof(int)*m, cudaMemcpyDeviceToHost);
gpu_up_swing<<<dimGrid, dimBlock>>>(d_a,d_c, m);
cudaThreadSynchronize();
t = clock()-t;
double time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("Time elapsed on operation of %d: %lf ms.\n\n", m,(time_taken/1000));
cudaFree(d_a);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_c);
//cudaFreeHost(h_cc);
return 0;
}
|
73c2dc0765b93fe3da10423de5dff5bd7c6da7c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <algorithm>
#include "bucketselect_combined.cuh"
//#include "bucketselect.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <fstream>
#include <random>
// #define Enabletest 1
using namespace std;
typedef unsigned int data_t;
//typedef unsigned int data_t;
typedef int index_t;
int compare (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );//in ascending order
}
template<typename data_t,typename index_t>
index_t power(index_t x,index_t n)
{
index_t number=1;
for (index_t i=0; i<n ;i++)
{
number*=x;
}
return number;
}
void getminmax(data_t* arr,index_t n,data_t& max,data_t& min)
{
//data_t max=arr[0];
for (index_t i=1;i<n;i++)
{
if (arr[i]> max)
{
max=arr[i];
}
if (arr[i]<min)
{
min=arr[i];
}
}
return;
}
int main(int argc,char** argv)
{
cout<<"./exe num_element k NBucket"<<endl;
if (argc != 4) {cout<<"wrong input"<<endl;exit(-1);}
index_t num_pow = atol(argv[1]);
index_t base=2;
index_t num_element = power<data_t,index_t>(base,num_pow);
index_t k= atol(argv[2]);
index_t num_bucket=atol(argv[3]);//atol(argv[3]);
// index_t num_bucket=1<<NBits;
index_t alpha=0.5*(num_pow-log(k)/log(2)+3);
if (alpha<5) alpha++;
bool defaultContribution=true;
int beta=2;
index_t SubRangesize=pow(2,alpha);
index_t NSubranges=num_element/SubRangesize;
int NthreadstoworkInreduction=32;
if (SubRangesize<32)
{
NthreadstoworkInreduction=SubRangesize;
}
cout<<"Number of Subranges:"<<NSubranges<<endl;
if (NSubranges<k)
{
cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl;
// exit(-1);
}
if (alpha <=5)
{
defaultContribution=false;
beta=2;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
}
data_t* Max_d;
H_ERR(hipMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta
index_t* SubrangeId_d;
H_ERR(hipMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta
data_t* vec= new data_t[num_element];
data_t* vec1= new data_t[num_element];
std::random_device rd;
std::mt19937 gen(rd());
float minvalue=100000000;
unsigned int value;
// std::uniform_int_distribution <unsigned int> d(0, 4294967295);
std::normal_distribution<float> d(100000000, 10);//Mean =100 mill , sd=100
// std::uniform_real_distribution<> d(0.0, 4294967295.0);//Generates random uniformly distributed floats within the given range
for (index_t i=0;i<num_element;i++)
{
// vec[i]=rand()%2147483648;//2^31 -1
// value=d(gen);//2^31 -1
if (d(gen) < 0)
{
value=-d(gen);
}
else
{
value=d(gen);
}
if (minvalue > value)
{
minvalue=value;
}
vec[i]=value;//2^32 -1
// if(i<10000)
// cout<<vec[i]<<" ";
// cout<<endl;
vec1[i]=vec[i];
// if (vec[i] > 4294900000)
// {
// cout<<vec[i]<<" ";
// }
}
if (minvalue < 0)
{
cout<<"-ve value detected:"<<minvalue<<endl;
return -1;
}
cout<<vec[0];
cout<<endl;
data_t* TopArray=new data_t[k];
data_t TopKElement=0;
// data_t NNewTopElements;
data_t* vec_d;
H_ERR(hipMalloc((void**) &vec_d,sizeof(data_t)*num_element));
H_ERR(hipMemcpy(vec_d,vec,sizeof(data_t)*num_element,hipMemcpyHostToDevice));
double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0;
int NThreadsPerBlock=256;//only shared memory
int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements
int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5);
H_ERR(hipDeviceSynchronize());
index_t* SelectedSubrangeId_d;
H_ERR(hipMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*(NSubranges-k)*beta));//updated *3 for beta
index_t* CountSelectedSubrange_d;
index_t* CountLonelyElements_d;
H_ERR(hipMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t)));
H_ERR(hipMalloc((void**) &CountLonelyElements_d,sizeof(index_t)));
H_ERR(hipMemset(CountSelectedSubrange_d, 0, sizeof(index_t)));
H_ERR(hipMemset(CountLonelyElements_d, 0, sizeof(index_t)));
index_t* write_pos_d;
// H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
H_ERR(hipMalloc((void**) &write_pos_d,sizeof(index_t)));
data_t* ConcatenatedRange_d;
H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
double start=wtime();
sample_bucket_select<data_t,index_t>(vec_d,num_element,/*num_element-k*/k,num_bucket,TopKElement,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,beta,defaultContribution,NthreadstoworkInreduction,NThreadsPerBlock,SizeOfAllocation,NSharedMemoryElements, SelectedSubrangeId_d, CountSelectedSubrange_d, CountLonelyElements_d, write_pos_d, ConcatenatedRange_d);
// bucket_select<data_t,index_t>(vec_d,num_element,num_element-k,num_bucket,TopArray,TopKElement);
double totalTime=wtime()-start;
cout<<"Time for selecting the top k element is:"<<totalTime*1000<<" ms"<<endl;
// bucket_select_PhaseII<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,vec);
cout<<"The kth element from top is:"<<TopKElement<<endl;
cout<<endl;
#ifdef Enabletest
sort(vec1, vec1 + num_element);
cout<<endl;
cout<<"kth element"<<vec1[num_element-k]<<endl;
cout<<"k-1 th element"<<vec1[num_element-k+1]<<endl;
cout<<"k+1 th element"<<vec1[num_element-k-1]<<endl;
if (vec1[num_element-k]==TopKElement)
{
cout<<"Success!"<<endl;
}
else
{
cout<<"Not Success!"<<endl;
}
assert(vec1[num_element-k]==TopKElement);
#endif
std::fstream timeLog;
// timeLog.open("Uniform_Unsigned__N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
//timeLog.open("Normal_float_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
// timeLog.open("Normal_UINT_N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
timeLog.open("NewNormal_distributionN(2^15,5).csv",std::fstream::out | std::fstream::app);
// timeLog.open("Uniform_UINT_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
if (defaultContribution)
{
timeLog<<"D"<<";";
}
else
{
timeLog<<"B"<<";";
}
timeLog<<num_pow<<";"<<k<<";"<<alpha<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl;
// timeLog<<num_pow<<"_N_"<<num_element<<"k_"<<k<<"num_bucket_"<<num_bucket<<";"<<totalTime*1000<<endl;
timeLog.close();
return 0;
}
|
73c2dc0765b93fe3da10423de5dff5bd7c6da7c8.cu
|
#include <iostream>
#include <stdlib.h>
#include <algorithm>
#include "bucketselect_combined.cuh"
//#include "bucketselect.cuh"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <fstream>
#include <random>
// #define Enabletest 1
using namespace std;
typedef unsigned int data_t;
//typedef unsigned int data_t;
typedef int index_t;
int compare (const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );//in ascending order
}
template<typename data_t,typename index_t>
index_t power(index_t x,index_t n)
{
index_t number=1;
for (index_t i=0; i<n ;i++)
{
number*=x;
}
return number;
}
void getminmax(data_t* arr,index_t n,data_t& max,data_t& min)
{
//data_t max=arr[0];
for (index_t i=1;i<n;i++)
{
if (arr[i]> max)
{
max=arr[i];
}
if (arr[i]<min)
{
min=arr[i];
}
}
return;
}
int main(int argc,char** argv)
{
cout<<"./exe num_element k NBucket"<<endl;
if (argc != 4) {cout<<"wrong input"<<endl;exit(-1);}
index_t num_pow = atol(argv[1]);
index_t base=2;
index_t num_element = power<data_t,index_t>(base,num_pow);
index_t k= atol(argv[2]);
index_t num_bucket=atol(argv[3]);//atol(argv[3]);
// index_t num_bucket=1<<NBits;
index_t alpha=0.5*(num_pow-log(k)/log(2)+3);
if (alpha<5) alpha++;
bool defaultContribution=true;
int beta=2;
index_t SubRangesize=pow(2,alpha);
index_t NSubranges=num_element/SubRangesize;
int NthreadstoworkInreduction=32;
if (SubRangesize<32)
{
NthreadstoworkInreduction=SubRangesize;
}
cout<<"Number of Subranges:"<<NSubranges<<endl;
if (NSubranges<k)
{
cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl;
// exit(-1);
}
if (alpha <=5)
{
defaultContribution=false;
beta=2;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value
}
data_t* Max_d;
H_ERR(cudaMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta
index_t* SubrangeId_d;
H_ERR(cudaMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta
data_t* vec= new data_t[num_element];
data_t* vec1= new data_t[num_element];
std::random_device rd;
std::mt19937 gen(rd());
float minvalue=100000000;
unsigned int value;
// std::uniform_int_distribution <unsigned int> d(0, 4294967295);
std::normal_distribution<float> d(100000000, 10);//Mean =100 mill , sd=100
// std::uniform_real_distribution<> d(0.0, 4294967295.0);//Generates random uniformly distributed floats within the given range
for (index_t i=0;i<num_element;i++)
{
// vec[i]=rand()%2147483648;//2^31 -1
// value=d(gen);//2^31 -1
if (d(gen) < 0)
{
value=-d(gen);
}
else
{
value=d(gen);
}
if (minvalue > value)
{
minvalue=value;
}
vec[i]=value;//2^32 -1
// if(i<10000)
// cout<<vec[i]<<" ";
// cout<<endl;
vec1[i]=vec[i];
// if (vec[i] > 4294900000)
// {
// cout<<vec[i]<<" ";
// }
}
if (minvalue < 0)
{
cout<<"-ve value detected:"<<minvalue<<endl;
return -1;
}
cout<<vec[0];
cout<<endl;
data_t* TopArray=new data_t[k];
data_t TopKElement=0;
// data_t NNewTopElements;
data_t* vec_d;
H_ERR(cudaMalloc((void**) &vec_d,sizeof(data_t)*num_element));
H_ERR(cudaMemcpy(vec_d,vec,sizeof(data_t)*num_element,cudaMemcpyHostToDevice));
double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0;
int NThreadsPerBlock=256;//only shared memory
int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements
int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5);
H_ERR(cudaDeviceSynchronize());
index_t* SelectedSubrangeId_d;
H_ERR(cudaMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*(NSubranges-k)*beta));//updated *3 for beta
index_t* CountSelectedSubrange_d;
index_t* CountLonelyElements_d;
H_ERR(cudaMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t)));
H_ERR(cudaMalloc((void**) &CountLonelyElements_d,sizeof(index_t)));
H_ERR(cudaMemset(CountSelectedSubrange_d, 0, sizeof(index_t)));
H_ERR(cudaMemset(CountLonelyElements_d, 0, sizeof(index_t)));
index_t* write_pos_d;
// H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
H_ERR(cudaMalloc((void**) &write_pos_d,sizeof(index_t)));
data_t* ConcatenatedRange_d;
H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize));
double start=wtime();
sample_bucket_select<data_t,index_t>(vec_d,num_element,/*num_element-k*/k,num_bucket,TopKElement,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,beta,defaultContribution,NthreadstoworkInreduction,NThreadsPerBlock,SizeOfAllocation,NSharedMemoryElements, SelectedSubrangeId_d, CountSelectedSubrange_d, CountLonelyElements_d, write_pos_d, ConcatenatedRange_d);
// bucket_select<data_t,index_t>(vec_d,num_element,num_element-k,num_bucket,TopArray,TopKElement);
double totalTime=wtime()-start;
cout<<"Time for selecting the top k element is:"<<totalTime*1000<<" ms"<<endl;
// bucket_select_PhaseII<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,vec);
cout<<"The kth element from top is:"<<TopKElement<<endl;
cout<<endl;
#ifdef Enabletest
sort(vec1, vec1 + num_element);
cout<<endl;
cout<<"kth element"<<vec1[num_element-k]<<endl;
cout<<"k-1 th element"<<vec1[num_element-k+1]<<endl;
cout<<"k+1 th element"<<vec1[num_element-k-1]<<endl;
if (vec1[num_element-k]==TopKElement)
{
cout<<"Success!"<<endl;
}
else
{
cout<<"Not Success!"<<endl;
}
assert(vec1[num_element-k]==TopKElement);
#endif
std::fstream timeLog;
// timeLog.open("Uniform_Unsigned__N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
//timeLog.open("Normal_float_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
// timeLog.open("Normal_UINT_N_30_SOKBucket.csv",std::fstream::out | std::fstream::app);
timeLog.open("NewNormal_distributionN(2^15,5).csv",std::fstream::out | std::fstream::app);
// timeLog.open("Uniform_UINT_N_29_SOKBucket.csv",std::fstream::out | std::fstream::app);
if (defaultContribution)
{
timeLog<<"D"<<";";
}
else
{
timeLog<<"B"<<";";
}
timeLog<<num_pow<<";"<<k<<";"<<alpha<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl;
// timeLog<<num_pow<<"_N_"<<num_element<<"k_"<<k<<"num_bucket_"<<num_bucket<<";"<<totalTime*1000<<endl;
timeLog.close();
return 0;
}
|
05c3f8e701358f3200855c647009d822dba8dbab.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Libraries
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "benchmark.h"
/*
* Macros
*/
//#define DEBUG
#ifndef mult_tm
#define mult_tm 20
#endif
#ifndef mult_tn
#define mult_tn 16
#endif
#ifndef mult_tp
#define mult_tp 64
#endif
/*
* CUDA kernel bodies
*/
__global__ void full_kern(float *a, unsigned a_r, unsigned a_c,
float *b, unsigned b_r, unsigned b_c,
float *c, unsigned c_r, unsigned c_c)
{
// loop counter
unsigned i, j;
// starting positions in respective tiles
float *acur, *bcur, *ccur;
{
const unsigned block_pos_r = blockIdx.y*mult_tm;
const unsigned block_pos_c = blockIdx.x*2*mult_tp;
acur = a + block_pos_c + block_pos_r * a_c +
threadIdx.x + threadIdx.y * mult_tn;
bcur = b + (block_pos_r + threadIdx.y) * b_c + threadIdx.x;
ccur = c + block_pos_c + threadIdx.x + threadIdx.y * mult_tn;
}
// end of last tile in b
const float *bend = bcur + b_c;
// current a values
float aval_v1[mult_tm];
float aval_v2[mult_tm];
// initialize a values to zero
#pragma unroll
for (i = 0; i < mult_tm; ++i)
{
aval_v1[i] = 0.0f;
aval_v2[i] = 0.0f;
}
// for each tile read from b
do
{
// allocate shared space for tile in b
__shared__ float bs[mult_tn][mult_tm+1];
// put tile from b into shared memory
#pragma unroll
for (i = 0; i < mult_tm; i += (mult_tp/mult_tn))
bs[threadIdx.x][threadIdx.y+i] = bcur[i*b_c];
// move b's tile across
bcur += mult_tn;
// synchronize to ensure bll elements are read
__syncthreads();
// for each row in tile of c
#pragma unroll
for (i = 0; i < mult_tn; ++i)
{
// do mults and adds
#pragma unroll
for (j = 0; j < mult_tm; ++j)
{
aval_v1[j] += bs[i][j] * ccur[0];
aval_v2[j] += bs[i][j] * ccur[mult_tp];
}
ccur += c_c;
}
__syncthreads();
}
while (bcur < bend); // until last tile in b
// aopy results to global memory
#pragma unroll
for (i = 0; i < mult_tm; ++i, acur += a_c)
{
acur[0] = tanhf(aval_v1[i]);
acur[mult_tp] = tanhf(aval_v2[i]);
}
}
/*
* Function bodies
*/
#define tn 64
void figure_gold(float *gold, float *m1, float *m2, unsigned n)
{
unsigned ti, tj, tk;
unsigned i, j, k;
#pragma omp parallel for private(tj,tk,i,j,k)
for (ti = 0; ti < n; ti += tn)
for (tj = 0; tj < n; tj += tn)
for (tk = 0; tk < n; tk += tn)
for (i = ti; i < ti+tn; ++i)
for (j = tj; j < tj+tn; ++j)
{
float dot = gold[i*n+j];
for (k = tk; k < tk+tn; ++k)
dot += m1[i*n+k] * m2[k*n+j];
gold[i*n+j] = dot;
}
#pragma omp parallel
for (i = 0; i < n*n; ++i)
gold[i] = tanhf(gold[i]);
}
int main(int narg, char **arg)
{
// counter & size of vector
unsigned i, n = 6400;
// timers
benchmark bm;
benchmark_init(&bm);
// grab n from command line if given
if (narg > 1)
n = atoi(arg[1]);
const size_t size = n*n*sizeof(float);
#ifdef DEBUG
printf("n: %d\n", n);
#endif
// allocate space for first matrix
float *m1 = (float*)malloc(size);
if (m1 == NULL) {
fprintf(stderr, "Could not allocate space for m1!\n");
exit(1);
}
// allocate space for second matrix
float *m2 = (float*)malloc(size);
if (m2 == NULL) {
fprintf(stderr, "Could not allocate space for m2!\n");
exit(1);
}
// allocate space for result matrix
float *result = (float*)malloc(size);
if (result == NULL) {
fprintf(stderr, "Could not allocate space for result!\n");
exit(1);
}
// allocate space for gold matrix
float *gold = (float*)malloc(size);
if (gold == NULL) {
fprintf(stderr, "Could not allocate space for gold!\n");
exit(1);
}
// seed random number generator
srand(7);
// fill m1 and m2 with random numbers from [-7,7]
for (i = 0; i < n*n; ++i)
{
m1[i] = ((((float)rand()) / RAND_MAX) * 14.0f) - 7.0f;
m2[i] = ((((float)rand()) / RAND_MAX) * 14.0f) - 7.0f;
#ifdef DEBUG
printf("m1[%d]: %f\n", i, m1[i]);
#endif
}
// figure gold values
printf("start\n");
figure_gold(gold, m1, m2, n);
printf("end\n");
// allocate space for matrices on gpu
float *d_m1;
hipMalloc((void**)&d_m1, size);
if (hipGetLastError() != hipSuccess) {
fprintf(stderr, "Could not allocate d_m1!\n");
exit(1);
}
float *d_m2;
hipMalloc((void**)&d_m2, size);
if (hipGetLastError() != hipSuccess) {
fprintf(stderr, "Could not allocate d_m2!\n");
exit(1);
}
float *d_result;
hipMalloc((void**)&d_result, size);
if (hipGetLastError() != hipSuccess) {
fprintf(stderr, "Could not allocate d_result!\n");
exit(1);
}
// copy m1 & m2 to device
hipMemcpy(d_m1, m1, size, hipMemcpyHostToDevice);
if (hipGetLastError() != hipSuccess) {
fprintf(stderr, "Could not copy m1 to device!\n");
exit(1);
}
hipMemcpy(d_m2, m2, size, hipMemcpyHostToDevice);
if (hipGetLastError() != hipSuccess) {
fprintf(stderr, "Could not copy m2 to device!\n");
exit(1);
}
// set grid and block dims
const dim3 block_size(mult_tn, mult_tp/mult_tn, 1);
const unsigned num_cblk_r = n / mult_tm;
const unsigned num_cblk_c = n / (2*mult_tp);
const dim3 grid_size(num_cblk_c, num_cblk_r, 1);
// warm up
hipLaunchKernelGGL(( full_kern), dim3(grid_size), dim3(block_size), 0, 0, d_result, n, n,
d_m1, n, n,
d_m2, n, n);
hipDeviceSynchronize();
// gold run
benchmark_start_timer(&bm);
hipLaunchKernelGGL(( full_kern), dim3(grid_size), dim3(block_size), 0, 0, d_result, n, n,
d_m1, n, n,
d_m2, n, n);
hipDeviceSynchronize();
benchmark_stop_timer(&bm);
hipMemcpy(result, d_result, size, hipMemcpyDeviceToHost);
float max_err = 0.0f;
for (i = i; i < n*n; ++i)
{
float actual_cur = gold[i];
if (fabs(actual_cur) > 1.0e-10f)
{
float rel_err = fabs(actual_cur - result[i] /
actual_cur);
if (rel_err > max_err)
max_err = rel_err;
}
}
// set number of floating point ops
benchmark_add_flop(&bm, n*n*(2ll*n));
printf("Full tanh\n=======\n");
printf("Time: %f GFlopS: %f Error: %f\n\n",
benchmark_check_timer(bm),
benchmark_check_gflops(bm),
max_err);
free(m1);
free(m2);
free(result);
free(gold);
hipFree(d_m1);
hipFree(d_m2);
hipFree(d_result);
return 0;
}
|
05c3f8e701358f3200855c647009d822dba8dbab.cu
|
/*
* Libraries
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "benchmark.h"
/*
* Macros
*/
//#define DEBUG
#ifndef mult_tm
#define mult_tm 20
#endif
#ifndef mult_tn
#define mult_tn 16
#endif
#ifndef mult_tp
#define mult_tp 64
#endif
/*
* CUDA kernel bodies
*/
__global__ void full_kern(float *a, unsigned a_r, unsigned a_c,
float *b, unsigned b_r, unsigned b_c,
float *c, unsigned c_r, unsigned c_c)
{
// loop counter
unsigned i, j;
// starting positions in respective tiles
float *acur, *bcur, *ccur;
{
const unsigned block_pos_r = blockIdx.y*mult_tm;
const unsigned block_pos_c = blockIdx.x*2*mult_tp;
acur = a + block_pos_c + block_pos_r * a_c +
threadIdx.x + threadIdx.y * mult_tn;
bcur = b + (block_pos_r + threadIdx.y) * b_c + threadIdx.x;
ccur = c + block_pos_c + threadIdx.x + threadIdx.y * mult_tn;
}
// end of last tile in b
const float *bend = bcur + b_c;
// current a values
float aval_v1[mult_tm];
float aval_v2[mult_tm];
// initialize a values to zero
#pragma unroll
for (i = 0; i < mult_tm; ++i)
{
aval_v1[i] = 0.0f;
aval_v2[i] = 0.0f;
}
// for each tile read from b
do
{
// allocate shared space for tile in b
__shared__ float bs[mult_tn][mult_tm+1];
// put tile from b into shared memory
#pragma unroll
for (i = 0; i < mult_tm; i += (mult_tp/mult_tn))
bs[threadIdx.x][threadIdx.y+i] = bcur[i*b_c];
// move b's tile across
bcur += mult_tn;
// synchronize to ensure bll elements are read
__syncthreads();
// for each row in tile of c
#pragma unroll
for (i = 0; i < mult_tn; ++i)
{
// do mults and adds
#pragma unroll
for (j = 0; j < mult_tm; ++j)
{
aval_v1[j] += bs[i][j] * ccur[0];
aval_v2[j] += bs[i][j] * ccur[mult_tp];
}
ccur += c_c;
}
__syncthreads();
}
while (bcur < bend); // until last tile in b
// aopy results to global memory
#pragma unroll
for (i = 0; i < mult_tm; ++i, acur += a_c)
{
acur[0] = tanhf(aval_v1[i]);
acur[mult_tp] = tanhf(aval_v2[i]);
}
}
/*
* Function bodies
*/
#define tn 64
void figure_gold(float *gold, float *m1, float *m2, unsigned n)
{
unsigned ti, tj, tk;
unsigned i, j, k;
#pragma omp parallel for private(tj,tk,i,j,k)
for (ti = 0; ti < n; ti += tn)
for (tj = 0; tj < n; tj += tn)
for (tk = 0; tk < n; tk += tn)
for (i = ti; i < ti+tn; ++i)
for (j = tj; j < tj+tn; ++j)
{
float dot = gold[i*n+j];
for (k = tk; k < tk+tn; ++k)
dot += m1[i*n+k] * m2[k*n+j];
gold[i*n+j] = dot;
}
#pragma omp parallel
for (i = 0; i < n*n; ++i)
gold[i] = tanhf(gold[i]);
}
int main(int narg, char **arg)
{
// counter & size of vector
unsigned i, n = 6400;
// timers
benchmark bm;
benchmark_init(&bm);
// grab n from command line if given
if (narg > 1)
n = atoi(arg[1]);
const size_t size = n*n*sizeof(float);
#ifdef DEBUG
printf("n: %d\n", n);
#endif
// allocate space for first matrix
float *m1 = (float*)malloc(size);
if (m1 == NULL) {
fprintf(stderr, "Could not allocate space for m1!\n");
exit(1);
}
// allocate space for second matrix
float *m2 = (float*)malloc(size);
if (m2 == NULL) {
fprintf(stderr, "Could not allocate space for m2!\n");
exit(1);
}
// allocate space for result matrix
float *result = (float*)malloc(size);
if (result == NULL) {
fprintf(stderr, "Could not allocate space for result!\n");
exit(1);
}
// allocate space for gold matrix
float *gold = (float*)malloc(size);
if (gold == NULL) {
fprintf(stderr, "Could not allocate space for gold!\n");
exit(1);
}
// seed random number generator
srand(7);
// fill m1 and m2 with random numbers from [-7,7]
for (i = 0; i < n*n; ++i)
{
m1[i] = ((((float)rand()) / RAND_MAX) * 14.0f) - 7.0f;
m2[i] = ((((float)rand()) / RAND_MAX) * 14.0f) - 7.0f;
#ifdef DEBUG
printf("m1[%d]: %f\n", i, m1[i]);
#endif
}
// figure gold values
printf("start\n");
figure_gold(gold, m1, m2, n);
printf("end\n");
// allocate space for matrices on gpu
float *d_m1;
cudaMalloc((void**)&d_m1, size);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Could not allocate d_m1!\n");
exit(1);
}
float *d_m2;
cudaMalloc((void**)&d_m2, size);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Could not allocate d_m2!\n");
exit(1);
}
float *d_result;
cudaMalloc((void**)&d_result, size);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Could not allocate d_result!\n");
exit(1);
}
// copy m1 & m2 to device
cudaMemcpy(d_m1, m1, size, cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Could not copy m1 to device!\n");
exit(1);
}
cudaMemcpy(d_m2, m2, size, cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Could not copy m2 to device!\n");
exit(1);
}
// set grid and block dims
const dim3 block_size(mult_tn, mult_tp/mult_tn, 1);
const unsigned num_cblk_r = n / mult_tm;
const unsigned num_cblk_c = n / (2*mult_tp);
const dim3 grid_size(num_cblk_c, num_cblk_r, 1);
// warm up
full_kern<<<grid_size, block_size>>>(d_result, n, n,
d_m1, n, n,
d_m2, n, n);
cudaDeviceSynchronize();
// gold run
benchmark_start_timer(&bm);
full_kern<<<grid_size, block_size>>>(d_result, n, n,
d_m1, n, n,
d_m2, n, n);
cudaDeviceSynchronize();
benchmark_stop_timer(&bm);
cudaMemcpy(result, d_result, size, cudaMemcpyDeviceToHost);
float max_err = 0.0f;
for (i = i; i < n*n; ++i)
{
float actual_cur = gold[i];
if (fabs(actual_cur) > 1.0e-10f)
{
float rel_err = fabs(actual_cur - result[i] /
actual_cur);
if (rel_err > max_err)
max_err = rel_err;
}
}
// set number of floating point ops
benchmark_add_flop(&bm, n*n*(2ll*n));
printf("Full tanh\n=======\n");
printf("Time: %f GFlopS: %f Error: %f\n\n",
benchmark_check_timer(bm),
benchmark_check_gflops(bm),
max_err);
free(m1);
free(m2);
free(result);
free(gold);
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_result);
return 0;
}
|
ca1b40b5826b248ba23dbdd7f7226b63879413f9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include <THH/THHApply.cuh>
#include "common.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// copied from cutorch/lib/THC/THCTensorRandom.cu
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
#define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
template<typename T>
inline T __device__ curand_uniform_type(hiprandStateMtgp32_t *state);
#ifdef CUDA_HALF_TENSOR
template <>
inline half __device__ curand_uniform_type<half>(hiprandStateMtgp32_t *state) {
return ScalarConvert<float, half>::to(hiprand_uniform(state));
}
#endif
template <>
inline float __device__ curand_uniform_type<float>(hiprandStateMtgp32_t *state) {
return hiprand_uniform(state);
}
template <>
inline double __device__ curand_uniform_type<double>(hiprandStateMtgp32_t *state) {
return hiprand_uniform_double(state);
}
template <typename T>
__global__ void rreluUpdateOutputTrain(int n, hiprandStateMtgp32_t *state,
T *input, T* noise, T *output, double a, double b)
{
CUDA_KERNEL_LOOP(i, n)
{
if (input[i] <= 0)
{
T r = curand_uniform_type<T>(&state[blockIdx.x]);
r = ScalarConvert<double, T>::to(r * (b-a) + a);
output[i] = input[i] * r;
noise[i] = r;
}
else
{
output[i] = input[i];
noise[i] = ScalarConvert<int, T>::to(1);
}
}
}
template <typename T>
struct RReLUUpdateOutputEval_functor
{
const T negSlope_;
RReLUUpdateOutputEval_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *out, T *in)
{
const T x = *in;
const T r = x <= 0 ? negSlope_ : ScalarConvert<int, T>::to(1);
*out = x * r;
}
};
template <typename T>
struct RReLUUpdateOutputEvalIP_functor
{
const T negSlope_;
RReLUUpdateOutputEvalIP_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *x)
{
if (*x <= 0)
{
*x = *x * negSlope_;
}
}
};
template <typename T>
struct RReLUupdateGradInputEval_functor
{
const T negSlope_;
RReLUupdateGradInputEval_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *gradIn, T *gradOut, T *in)
{
*gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut);
}
};
template <typename T>
struct RReLUupdateGradInputEvalIP_functor
{
const T negSlope_;
RReLUupdateGradInputEvalIP_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *gradOut, T *in)
{
if (*in <= 0)
{
*gradOut = (*gradOut) * negSlope_;
}
}
};
#include "generic/RReLU.cu"
#include "THHGenerateFloatTypes.h"
|
ca1b40b5826b248ba23dbdd7f7226b63879413f9.cu
|
#include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include <THC/THCApply.cuh>
#include "common.h"
#include <curand.h>
#include <curand_kernel.h>
// copied from cutorch/lib/THC/THCTensorRandom.cu
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
#define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
template<typename T>
inline T __device__ curand_uniform_type(curandStateMtgp32 *state);
#ifdef CUDA_HALF_TENSOR
template <>
inline half __device__ curand_uniform_type<half>(curandStateMtgp32 *state) {
return ScalarConvert<float, half>::to(curand_uniform(state));
}
#endif
template <>
inline float __device__ curand_uniform_type<float>(curandStateMtgp32 *state) {
return curand_uniform(state);
}
template <>
inline double __device__ curand_uniform_type<double>(curandStateMtgp32 *state) {
return curand_uniform_double(state);
}
template <typename T>
__global__ void rreluUpdateOutputTrain(int n, curandStateMtgp32 *state,
T *input, T* noise, T *output, double a, double b)
{
CUDA_KERNEL_LOOP(i, n)
{
if (input[i] <= 0)
{
T r = curand_uniform_type<T>(&state[blockIdx.x]);
r = ScalarConvert<double, T>::to(r * (b-a) + a);
output[i] = input[i] * r;
noise[i] = r;
}
else
{
output[i] = input[i];
noise[i] = ScalarConvert<int, T>::to(1);
}
}
}
template <typename T>
struct RReLUUpdateOutputEval_functor
{
const T negSlope_;
RReLUUpdateOutputEval_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *out, T *in)
{
const T x = *in;
const T r = x <= 0 ? negSlope_ : ScalarConvert<int, T>::to(1);
*out = x * r;
}
};
template <typename T>
struct RReLUUpdateOutputEvalIP_functor
{
const T negSlope_;
RReLUUpdateOutputEvalIP_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *x)
{
if (*x <= 0)
{
*x = *x * negSlope_;
}
}
};
template <typename T>
struct RReLUupdateGradInputEval_functor
{
const T negSlope_;
RReLUupdateGradInputEval_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *gradIn, T *gradOut, T *in)
{
*gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut);
}
};
template <typename T>
struct RReLUupdateGradInputEvalIP_functor
{
const T negSlope_;
RReLUupdateGradInputEvalIP_functor(T negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(T *gradOut, T *in)
{
if (*in <= 0)
{
*gradOut = (*gradOut) * negSlope_;
}
}
};
#include "generic/RReLU.cu"
#include "THCGenerateFloatTypes.h"
|
a0823748d27512bbe30e4d40c1e4544d136d4a91.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020 HOOMD-TF Developers
#include "TensorflowCompute.cuh"
#include <iostream>
/*! \file TensorflowCompute.cu
\brief CUDA kernels and functions for TensorflowCompute
*/
extern "C" __global__
void htf_gpu_add_scalar4_kernel(Scalar4 *dest, Scalar4 *src, unsigned int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
dest[i].x += src[i].x;
dest[i].y += src[i].y;
dest[i].z += src[i].z;
dest[i].w += src[i].w;
}
}
hipError_t htf_gpu_add_scalar4(Scalar4 *dest, Scalar4 *src, unsigned int m_N, hipStream_t s)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (int)ceil((double)m_N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( htf_gpu_add_scalar4_kernel), dim3(grid), dim3(threads), 0, s , dest, src, m_N);
// this method always succeds.
// If you had a cuda* call in this driver, you could return its error code, if not
// hipSuccess
return hipSuccess;
}
extern "C" __global__
void htf_gpu_add_virial_kernel(Scalar *dest, Scalar *src, unsigned int m_N, unsigned int m_pitch)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m_N)
{
dest[0 * m_pitch + i] += src[i * 9 + 0]; //xx
dest[1 * m_pitch + i] += src[i * 9 + 1]; //xy
dest[2 * m_pitch + i] += src[i * 9 + 2]; //xz
dest[3 * m_pitch + i] += src[i * 9 + 4]; //yy
dest[4 * m_pitch + i] += src[i * 9 + 5]; //yz
dest[5 * m_pitch + i] += src[i * 9 + 8]; //zz
}
}
hipError_t htf_gpu_add_virial(Scalar *dest, Scalar *src, unsigned int m_N, unsigned int m_pitch, hipStream_t s)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (int)ceil((double)m_N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( htf_gpu_add_virial_kernel), dim3(grid), dim3(threads), 0, s , dest, src, m_N, m_pitch);
// this method always succeds.
// If you had a cuda* call in this driver, you could return its error code, if not
// hipSuccess
return hipSuccess;
}
#include "hoomd/TextureTools.h"
#include "hoomd/Index1D.h"
#include <assert.h>
//! Texture for reading the neighbor list
texture<unsigned int, 1, hipReadModeElementType> nlist_tex;
__global__ void htf_gpu_reshape_nlist_kernel(Scalar4* dest,
const unsigned int N,
const unsigned int NN,
const unsigned int offset,
const unsigned int batch_size,
const Scalar4 *d_pos,
const BoxDim box,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const unsigned int *d_head_list,
double rmax)
{
// start by identifying which particle we are to handle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x + offset;
if (idx >= N || idx - offset >= batch_size)
return;
// load in the length of the list
unsigned int n_neigh = d_n_neigh[idx];
const unsigned int head_idx = d_head_list[idx];
// read in the position of our particle. Texture reads of Scalar4's are faster than global reads on compute 1.0 hardware
Scalar4 postype = __ldg(d_pos + idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
unsigned int typei = __scalar_as_int(postype.w);
// prefetch neighbor index
unsigned int cur_neigh = 0;
unsigned int next_neigh(0);
next_neigh = __ldg(d_nlist + head_idx);
unsigned int dest_idx = 0;
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
// read the current neighbor index
// prefetch the next value and set the current one
cur_neigh = next_neigh;
next_neigh = __ldg(d_nlist + head_idx + neigh_idx+1);
// get the neighbor's position
Scalar4 neigh_postype = __ldg(d_pos + cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = neigh_pos - pos;
// apply periodic boundary conditions
dx = box.minImage(dx);
// access needed parameters
unsigned int typej = __scalar_as_int(neigh_postype.w);
// calculate r
Scalar rsq = dot(dx, dx);
if (rsq < (rmax * rmax))
{
dest[(idx - offset) * NN + dest_idx].x = dx.x;
dest[(idx - offset) * NN + dest_idx].y = dx.y;
dest[(idx - offset) * NN + dest_idx].z = dx.z;
dest[(idx - offset) * NN + dest_idx].w = static_cast<Scalar> (typej);
dest_idx += 1;
// prevent overflow. Note this should not happen
// we check for it later, but this prevents
// illegeal mem access
dest_idx %= NN;
}
}
}
hipError_t htf_gpu_reshape_nlist(Scalar4* dest,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int NN,
const unsigned int offset,
const unsigned int batch_size,
const unsigned int n_ghost,
const BoxDim& box,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const unsigned int *d_head_list,
const unsigned int size_nlist,
const unsigned int block_size,
const unsigned int compute_capability,
const unsigned int max_tex1d_width,
double rmax,
hipStream_t stream)
{
assert(d_pos);
assert(dest);
assert(d_n_neigh);
assert(d_nlist);
assert(d_head_list);
//set neighbors to zeros
hipMemset(dest, 1, batch_size * NN * sizeof(Scalar4));
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, htf_gpu_reshape_nlist_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( batch_size / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
hipLaunchKernelGGL(( htf_gpu_reshape_nlist_kernel), dim3(grid), dim3(threads), 0, stream, dest,
N,
NN,
offset,
batch_size,
d_pos,
box,
d_n_neigh,
d_nlist,
d_head_list,
rmax);
return hipSuccess;
}
|
a0823748d27512bbe30e4d40c1e4544d136d4a91.cu
|
// Copyright (c) 2020 HOOMD-TF Developers
#include "TensorflowCompute.cuh"
#include <iostream>
/*! \file TensorflowCompute.cu
\brief CUDA kernels and functions for TensorflowCompute
*/
extern "C" __global__
void htf_gpu_add_scalar4_kernel(Scalar4 *dest, Scalar4 *src, unsigned int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
dest[i].x += src[i].x;
dest[i].y += src[i].y;
dest[i].z += src[i].z;
dest[i].w += src[i].w;
}
}
cudaError_t htf_gpu_add_scalar4(Scalar4 *dest, Scalar4 *src, unsigned int m_N, cudaStream_t s)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (int)ceil((double)m_N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
htf_gpu_add_scalar4_kernel<<< grid, threads, 0, s >>>(dest, src, m_N);
// this method always succeds.
// If you had a cuda* call in this driver, you could return its error code, if not
// cudaSuccess
return cudaSuccess;
}
extern "C" __global__
void htf_gpu_add_virial_kernel(Scalar *dest, Scalar *src, unsigned int m_N, unsigned int m_pitch)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m_N)
{
dest[0 * m_pitch + i] += src[i * 9 + 0]; //xx
dest[1 * m_pitch + i] += src[i * 9 + 1]; //xy
dest[2 * m_pitch + i] += src[i * 9 + 2]; //xz
dest[3 * m_pitch + i] += src[i * 9 + 4]; //yy
dest[4 * m_pitch + i] += src[i * 9 + 5]; //yz
dest[5 * m_pitch + i] += src[i * 9 + 8]; //zz
}
}
cudaError_t htf_gpu_add_virial(Scalar *dest, Scalar *src, unsigned int m_N, unsigned int m_pitch, cudaStream_t s)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (int)ceil((double)m_N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
htf_gpu_add_virial_kernel<<< grid, threads, 0, s >>>(dest, src, m_N, m_pitch);
// this method always succeds.
// If you had a cuda* call in this driver, you could return its error code, if not
// cudaSuccess
return cudaSuccess;
}
#include "hoomd/TextureTools.h"
#include "hoomd/Index1D.h"
#include <assert.h>
//! Texture for reading the neighbor list
texture<unsigned int, 1, cudaReadModeElementType> nlist_tex;
__global__ void htf_gpu_reshape_nlist_kernel(Scalar4* dest,
const unsigned int N,
const unsigned int NN,
const unsigned int offset,
const unsigned int batch_size,
const Scalar4 *d_pos,
const BoxDim box,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const unsigned int *d_head_list,
double rmax)
{
// start by identifying which particle we are to handle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x + offset;
if (idx >= N || idx - offset >= batch_size)
return;
// load in the length of the list
unsigned int n_neigh = d_n_neigh[idx];
const unsigned int head_idx = d_head_list[idx];
// read in the position of our particle. Texture reads of Scalar4's are faster than global reads on compute 1.0 hardware
Scalar4 postype = __ldg(d_pos + idx);
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
unsigned int typei = __scalar_as_int(postype.w);
// prefetch neighbor index
unsigned int cur_neigh = 0;
unsigned int next_neigh(0);
next_neigh = __ldg(d_nlist + head_idx);
unsigned int dest_idx = 0;
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
// read the current neighbor index
// prefetch the next value and set the current one
cur_neigh = next_neigh;
next_neigh = __ldg(d_nlist + head_idx + neigh_idx+1);
// get the neighbor's position
Scalar4 neigh_postype = __ldg(d_pos + cur_neigh);
Scalar3 neigh_pos = make_scalar3(neigh_postype.x, neigh_postype.y, neigh_postype.z);
// calculate dr (with periodic boundary conditions)
Scalar3 dx = neigh_pos - pos;
// apply periodic boundary conditions
dx = box.minImage(dx);
// access needed parameters
unsigned int typej = __scalar_as_int(neigh_postype.w);
// calculate r
Scalar rsq = dot(dx, dx);
if (rsq < (rmax * rmax))
{
dest[(idx - offset) * NN + dest_idx].x = dx.x;
dest[(idx - offset) * NN + dest_idx].y = dx.y;
dest[(idx - offset) * NN + dest_idx].z = dx.z;
dest[(idx - offset) * NN + dest_idx].w = static_cast<Scalar> (typej);
dest_idx += 1;
// prevent overflow. Note this should not happen
// we check for it later, but this prevents
// illegeal mem access
dest_idx %= NN;
}
}
}
cudaError_t htf_gpu_reshape_nlist(Scalar4* dest,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int NN,
const unsigned int offset,
const unsigned int batch_size,
const unsigned int n_ghost,
const BoxDim& box,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const unsigned int *d_head_list,
const unsigned int size_nlist,
const unsigned int block_size,
const unsigned int compute_capability,
const unsigned int max_tex1d_width,
double rmax,
cudaStream_t stream)
{
assert(d_pos);
assert(dest);
assert(d_n_neigh);
assert(d_nlist);
assert(d_head_list);
//set neighbors to zeros
cudaMemset(dest, 1, batch_size * NN * sizeof(Scalar4));
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, htf_gpu_reshape_nlist_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( batch_size / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
htf_gpu_reshape_nlist_kernel<<< grid, threads, 0, stream>>>(dest,
N,
NN,
offset,
batch_size,
d_pos,
box,
d_n_neigh,
d_nlist,
d_head_list,
rmax);
return cudaSuccess;
}
|
5aee422566b0e85eafc981bcd28c24fb5b1b6c90.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 65536
__global__ void add( int *a, int *b, int *c )
{
int tid = blockIdx.x; // CUDA (built-in)
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c; //
// GPU .
hipMalloc((void**)&dev_a, N * sizeof(int)); // CUDA : cudaMallocMAnaged
hipMalloc((void**)&dev_b, N * sizeof(int));
hipMalloc((void**)&dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) // CPU Matrix
{
a[i] = -i;
b[i] = i * i;
}
// 'a' 'b' CPU GPU .
hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
// GPU
add << <N, 1 >> > (dev_a, dev_b, dev_c); // <<< , ? >>>
// 'c' GPU CPU .
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
// .
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d \n", a[i], b[i], c[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
|
5aee422566b0e85eafc981bcd28c24fb5b1b6c90.cu
|
#include <stdio.h>
#define N 65536
__global__ void add( int *a, int *b, int *c )
{
int tid = blockIdx.x; // CUDA 런타임의 내장(built-in)변수 중 하나
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c; // 포인터 변수 선언
// GPU 메모리를 할당한다.
cudaMalloc((void**)&dev_a, N * sizeof(int)); // CUDA에서 메모리를 할당하는 방법 : cudaMallocMAnaged 도 있음
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) // CPU 연산을 이용하여 Matrix값 초기화
{
a[i] = -i;
b[i] = i * i;
}
// 배열 'a'와 'b'를 CPU에서 GPU로 복사한다.
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
// GPU에서 합 연산 수행
add << <N, 1 >> > (dev_a, dev_b, dev_c); // <<<병렬블록의 개수, ? >>>
// 배열 'c'를 GPU에서 CPU로 복사한다.
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// 결과를 출력한다.
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d \n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
9756983aeeca5ce62b6645fa45837fb0d9b08947.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "BlockRenderer.cuh"
BlockRenderer::BlockConfiguration::BlockConfiguration(int blockCutPerCpuThread, int blockCutPerGpuSM, bool forceDeviceInstanceUpdate) {
blocksPerCpuThread = blockCutPerCpuThread;
blocksPerGpuSM = blockCutPerGpuSM;
forceUpdateDeviceInstance = forceDeviceInstanceUpdate;
}
int BlockRenderer::BlockConfiguration::blockCutPerCpuThread()const { return blocksPerCpuThread; }
int BlockRenderer::BlockConfiguration::blockCutPerGpuSM()const { return blocksPerGpuSM; }
bool BlockRenderer::BlockConfiguration::forceDeviceInstanceUpdate()const { return forceUpdateDeviceInstance; }
BlockRenderer::BlockRenderer(
const ThreadConfiguration &configuration,
const BlockConfiguration &blockSettings,
FrameBufferManager *buffer) : BufferedRenderer(configuration, buffer) {
blockConfiguration = blockSettings;
threadData.flush(deviceThreadCount() + hostThreadCount());
hostBlockSynchNeeded = (
(threadConfiguration().numActiveDevices() > 1)
|| (threadConfiguration().numHostThreads() > 0)
|| blockConfiguration.forceDeviceInstanceUpdate());
}
BlockRenderer::~BlockRenderer() { killRenderThreads(); }
const BlockRenderer::BlockConfiguration &BlockRenderer::blockRendererConfiguration() const { return blockConfiguration; }
bool BlockRenderer::automaticallySynchesHostBlocks()const { return hostBlockSynchNeeded; }
bool BlockRenderer::setupSharedData(const Info &, void *&) {
/*
size_t stackSize;
if (hipDeviceGetLimit(&stackSize, hipLimitStackSize) != hipSuccess) return false;
const int neededStackSize = 8192;
if (stackSize < neededStackSize) if (hipDeviceSetLimit(hipLimitStackSize, neededStackSize) != hipSuccess) return false;
//*/
return true;
}
bool BlockRenderer::setupData(const Info &info, void *&) {
// __TODO__: (maybe) record the errors somehow...
if (info.isGPU()) {
FrameBuffer::DeviceBlockManager *manager = new FrameBuffer::DeviceBlockManager(
info.device, (hostBlockSynchNeeded ?
(FrameBuffer::DeviceBlockManager::Settings)FrameBuffer::DeviceBlockManager::CUDA_RENDER_STREAM_AUTO_SYNCH_ON_GET :
(FrameBuffer::DeviceBlockManager::Settings)FrameBuffer::DeviceBlockManager::CUDA_MANUALLY_SYNCH_HOST_BLOCKS),
blockConfiguration.blockCutPerGpuSM());
if (manager == NULL) return false; // ALLOCATION FAILURE...
else if (manager->errors() != 0) { delete manager; return false; } // INTERNAL ERRORS...
threadData[info.globalThreadId].blockManager = manager;
}
return true;
}
bool BlockRenderer::prepareIteration() {
// __TODO__: (maybe) record the errors somehow...
FrameBufferManager *manager = getFrameBuffer();
if (manager == NULL) return false;
FrameBuffer *cpuHandle = manager->cpuHandle();
if (cpuHandle == NULL) return false;
else if (cpuHandle->object() == NULL) return false;
blockBank.reset(*cpuHandle);
return true;
}
void BlockRenderer::iterateCPU(const Info &info) {
// __TODO__: (maybe) record the errors somehow...
FrameBuffer *buffer = getFrameBuffer()->cpuHandle();
if (buffer == NULL) return;
int start, end;
while ((!renderInterrupted()) && blockBank.getBlocks(4, &start, &end))
if (!renderBlocksCPU(info, buffer, start, end)) return;
}
void BlockRenderer::iterateGPU(const Info &info) {
// __TODO__: (maybe) record the errors somehow...
FrameBuffer *host = getFrameBuffer()->cpuHandle();
if (host == NULL) return;
FrameBuffer *device = getFrameBuffer()->gpuHandle(info.device);
if (device == NULL) return;
FrameBuffer::DeviceBlockManager *blockManager = threadData[info.globalThreadId].blockManager;
if (blockManager == NULL) return; // NORMALLY, THIS SHOULD NOT HAPPEN AT ALL...
if (!blockManager->setBuffers(host, device, &blockBank)) return;
bool synchNeeded = ((iteration() > 1) && hostBlockSynchNeeded);
int start = 0, end = 0;
hipStream_t &renderStream = blockManager->getRenderStream();
while ((!renderInterrupted()) && blockManager->getBlocks(start, end, synchNeeded))
if (!renderBlocksGPU(info, host, device, start, end, renderStream)) {
if (hipGetLastError() != hipSuccess) threadConfiguration().configureGPU(info.device, 0);
return;
}
// __TODO__: record errors if (blockManager->errors() != 0)
if (hostBlockSynchNeeded) { blockManager->synchBlockSynchStream(); /* THIS MAY FAIL AS WELL.. */ }
else blockManager->synchRenderStream(); /* THIS MAY FAIL AS WELL.. */
}
bool BlockRenderer::completeIteration() {
// __TODO__: (maybe) record the errors somehow...
// __TODO__: return false if any error was detected...
return true;
}
bool BlockRenderer::clearData(const Info &info, void *&) {
// __TODO__: (maybe) record the errors somehow...
if (info.isGPU()) {
if (threadData[info.globalThreadId].blockManager == NULL) return false;
delete threadData[info.globalThreadId].blockManager;
threadData[info.globalThreadId].blockManager = NULL;
}
return true;
}
bool BlockRenderer::clearSharedData(const Info &, void *&) { return true; }
|
9756983aeeca5ce62b6645fa45837fb0d9b08947.cu
|
#include "BlockRenderer.cuh"
BlockRenderer::BlockConfiguration::BlockConfiguration(int blockCutPerCpuThread, int blockCutPerGpuSM, bool forceDeviceInstanceUpdate) {
blocksPerCpuThread = blockCutPerCpuThread;
blocksPerGpuSM = blockCutPerGpuSM;
forceUpdateDeviceInstance = forceDeviceInstanceUpdate;
}
int BlockRenderer::BlockConfiguration::blockCutPerCpuThread()const { return blocksPerCpuThread; }
int BlockRenderer::BlockConfiguration::blockCutPerGpuSM()const { return blocksPerGpuSM; }
bool BlockRenderer::BlockConfiguration::forceDeviceInstanceUpdate()const { return forceUpdateDeviceInstance; }
BlockRenderer::BlockRenderer(
const ThreadConfiguration &configuration,
const BlockConfiguration &blockSettings,
FrameBufferManager *buffer) : BufferedRenderer(configuration, buffer) {
blockConfiguration = blockSettings;
threadData.flush(deviceThreadCount() + hostThreadCount());
hostBlockSynchNeeded = (
(threadConfiguration().numActiveDevices() > 1)
|| (threadConfiguration().numHostThreads() > 0)
|| blockConfiguration.forceDeviceInstanceUpdate());
}
BlockRenderer::~BlockRenderer() { killRenderThreads(); }
const BlockRenderer::BlockConfiguration &BlockRenderer::blockRendererConfiguration() const { return blockConfiguration; }
bool BlockRenderer::automaticallySynchesHostBlocks()const { return hostBlockSynchNeeded; }
bool BlockRenderer::setupSharedData(const Info &, void *&) {
/*
size_t stackSize;
if (cudaDeviceGetLimit(&stackSize, cudaLimitStackSize) != cudaSuccess) return false;
const int neededStackSize = 8192;
if (stackSize < neededStackSize) if (cudaDeviceSetLimit(cudaLimitStackSize, neededStackSize) != cudaSuccess) return false;
//*/
return true;
}
bool BlockRenderer::setupData(const Info &info, void *&) {
// __TODO__: (maybe) record the errors somehow...
if (info.isGPU()) {
FrameBuffer::DeviceBlockManager *manager = new FrameBuffer::DeviceBlockManager(
info.device, (hostBlockSynchNeeded ?
(FrameBuffer::DeviceBlockManager::Settings)FrameBuffer::DeviceBlockManager::CUDA_RENDER_STREAM_AUTO_SYNCH_ON_GET :
(FrameBuffer::DeviceBlockManager::Settings)FrameBuffer::DeviceBlockManager::CUDA_MANUALLY_SYNCH_HOST_BLOCKS),
blockConfiguration.blockCutPerGpuSM());
if (manager == NULL) return false; // ALLOCATION FAILURE...
else if (manager->errors() != 0) { delete manager; return false; } // INTERNAL ERRORS...
threadData[info.globalThreadId].blockManager = manager;
}
return true;
}
bool BlockRenderer::prepareIteration() {
// __TODO__: (maybe) record the errors somehow...
FrameBufferManager *manager = getFrameBuffer();
if (manager == NULL) return false;
FrameBuffer *cpuHandle = manager->cpuHandle();
if (cpuHandle == NULL) return false;
else if (cpuHandle->object() == NULL) return false;
blockBank.reset(*cpuHandle);
return true;
}
void BlockRenderer::iterateCPU(const Info &info) {
// __TODO__: (maybe) record the errors somehow...
FrameBuffer *buffer = getFrameBuffer()->cpuHandle();
if (buffer == NULL) return;
int start, end;
while ((!renderInterrupted()) && blockBank.getBlocks(4, &start, &end))
if (!renderBlocksCPU(info, buffer, start, end)) return;
}
void BlockRenderer::iterateGPU(const Info &info) {
// __TODO__: (maybe) record the errors somehow...
FrameBuffer *host = getFrameBuffer()->cpuHandle();
if (host == NULL) return;
FrameBuffer *device = getFrameBuffer()->gpuHandle(info.device);
if (device == NULL) return;
FrameBuffer::DeviceBlockManager *blockManager = threadData[info.globalThreadId].blockManager;
if (blockManager == NULL) return; // NORMALLY, THIS SHOULD NOT HAPPEN AT ALL...
if (!blockManager->setBuffers(host, device, &blockBank)) return;
bool synchNeeded = ((iteration() > 1) && hostBlockSynchNeeded);
int start = 0, end = 0;
cudaStream_t &renderStream = blockManager->getRenderStream();
while ((!renderInterrupted()) && blockManager->getBlocks(start, end, synchNeeded))
if (!renderBlocksGPU(info, host, device, start, end, renderStream)) {
if (cudaGetLastError() != cudaSuccess) threadConfiguration().configureGPU(info.device, 0);
return;
}
// __TODO__: record errors if (blockManager->errors() != 0)
if (hostBlockSynchNeeded) { blockManager->synchBlockSynchStream(); /* THIS MAY FAIL AS WELL.. */ }
else blockManager->synchRenderStream(); /* THIS MAY FAIL AS WELL.. */
}
bool BlockRenderer::completeIteration() {
// __TODO__: (maybe) record the errors somehow...
// __TODO__: return false if any error was detected...
return true;
}
bool BlockRenderer::clearData(const Info &info, void *&) {
// __TODO__: (maybe) record the errors somehow...
if (info.isGPU()) {
if (threadData[info.globalThreadId].blockManager == NULL) return false;
delete threadData[info.globalThreadId].blockManager;
threadData[info.globalThreadId].blockManager = NULL;
}
return true;
}
bool BlockRenderer::clearSharedData(const Info &, void *&) { return true; }
|
b17a374bc7572187939c87f2a06ca3600eeaf8cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/nested/NestedTensorBinaryOps.h>
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#define BLOCK_DIM 256
namespace at {
namespace native {
// only for nested [B, *, D], dense [B, 1, D]
template <typename T, typename func_t>
__global__ void op_dense_esuhm(
const T* input,
const T* dense,
T* output,
int64_t embedding_dim,
const int64_t* offsets,
const func_t& f)
{
// each batch is handled by a block
const int64_t batch_idx = blockIdx.x;
const int64_t grain_size = blockDim.x;
const int64_t tid = threadIdx.x;
const int64_t range = offsets[batch_idx + 1] - offsets[batch_idx];
// each thread handles (embedding_dim // grain_size + (embedding_dim % grain_size <= tid)) elems
// of the dense embedding
for (int64_t idx = tid; idx < embedding_dim; idx += grain_size) {
const T dense_elem = dense[batch_idx * embedding_dim + idx];
for (int64_t nested_idx = idx; nested_idx < range; nested_idx += embedding_dim) {
output[offsets[batch_idx] + nested_idx] = f(input[offsets[batch_idx] + nested_idx], dense_elem);
}
}
}
template <typename T, typename func_t>
void nested_op_dense_kernelLauncher(
const T* input, // [sum(*) x embedding_dim]
const T* dense, // [batch_size x embedding_dim]
T* output, // [sum(*) x embedding_dim]
int64_t batch_size,
int64_t embedding_dim,
const int64_t* input_offsets, // [batch_size]
func_t f)
{
dim3 grid;
grid.x = batch_size;
const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( op_dense_esuhm), dim3(grid), dim3(BLOCK_DIM), 0, stream,
input,
dense,
output,
embedding_dim,
input_offsets,
f);
}
template <typename scalar_t, typename func_t>
void _nested_op_dense_esuhm_kernel(Tensor& result, const Tensor& self, const Tensor& other, func_t f) {
auto self_ptr = get_nested_tensor_impl(self);
auto result_ptr = get_nested_tensor_impl(result);
const auto self_buffer = self_ptr->get_buffer();
const auto offsets = self_ptr->get_storage_offsets();
const auto batch_size = other.size(0);
const auto embedding_size = other.size(2);
auto result_buffer = result_ptr->get_buffer();
auto result_offsets = at::cat({offsets, at::tensor(self_ptr->numel())});
result_offsets = result_offsets.to(kCUDA);
const scalar_t* self_data_ptr = self_buffer.data_ptr<scalar_t>();
const scalar_t* other_data_ptr = other.data_ptr<scalar_t>();
scalar_t* result_data_ptr = result_buffer.data_ptr<scalar_t>();
int64_t* result_offsets_ptr = result_offsets.data_ptr<int64_t>();
nested_op_dense_kernelLauncher(
self_data_ptr,
other_data_ptr,
result_data_ptr,
batch_size,
embedding_size,
result_offsets_ptr,
f);
}
void _nested_op_dense_esuhm_cuda(Tensor& result, const Tensor& self, const Tensor& other, const NESTED_DENSE_OP& op) {
AT_DISPATCH_ALL_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "_nested_op_dense_esuhm", [&]() {
switch (op) {
case NESTED_DENSE_OP::ADD :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a + b; });
break;
case NESTED_DENSE_OP::MUL :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a * b; });
break;
}
});
}
REGISTER_CUDA_DISPATCH(nested_dense_elementwise_stub, &_nested_op_dense_esuhm_cuda);
} // namespace native
} // namespace at
|
b17a374bc7572187939c87f2a06ca3600eeaf8cb.cu
|
#include <ATen/native/nested/NestedTensorBinaryOps.h>
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#define BLOCK_DIM 256
namespace at {
namespace native {
// only for nested [B, *, D], dense [B, 1, D]
template <typename T, typename func_t>
__global__ void op_dense_esuhm(
const T* input,
const T* dense,
T* output,
int64_t embedding_dim,
const int64_t* offsets,
const func_t& f)
{
// each batch is handled by a block
const int64_t batch_idx = blockIdx.x;
const int64_t grain_size = blockDim.x;
const int64_t tid = threadIdx.x;
const int64_t range = offsets[batch_idx + 1] - offsets[batch_idx];
// each thread handles (embedding_dim // grain_size + (embedding_dim % grain_size <= tid)) elems
// of the dense embedding
for (int64_t idx = tid; idx < embedding_dim; idx += grain_size) {
const T dense_elem = dense[batch_idx * embedding_dim + idx];
for (int64_t nested_idx = idx; nested_idx < range; nested_idx += embedding_dim) {
output[offsets[batch_idx] + nested_idx] = f(input[offsets[batch_idx] + nested_idx], dense_elem);
}
}
}
template <typename T, typename func_t>
void nested_op_dense_kernelLauncher(
const T* input, // [sum(*) x embedding_dim]
const T* dense, // [batch_size x embedding_dim]
T* output, // [sum(*) x embedding_dim]
int64_t batch_size,
int64_t embedding_dim,
const int64_t* input_offsets, // [batch_size]
func_t f)
{
dim3 grid;
grid.x = batch_size;
const auto stream = at::cuda::getCurrentCUDAStream();
op_dense_esuhm<<<grid, BLOCK_DIM, 0, stream>>>(
input,
dense,
output,
embedding_dim,
input_offsets,
f);
}
template <typename scalar_t, typename func_t>
void _nested_op_dense_esuhm_kernel(Tensor& result, const Tensor& self, const Tensor& other, func_t f) {
auto self_ptr = get_nested_tensor_impl(self);
auto result_ptr = get_nested_tensor_impl(result);
const auto self_buffer = self_ptr->get_buffer();
const auto offsets = self_ptr->get_storage_offsets();
const auto batch_size = other.size(0);
const auto embedding_size = other.size(2);
auto result_buffer = result_ptr->get_buffer();
auto result_offsets = at::cat({offsets, at::tensor(self_ptr->numel())});
result_offsets = result_offsets.to(kCUDA);
const scalar_t* self_data_ptr = self_buffer.data_ptr<scalar_t>();
const scalar_t* other_data_ptr = other.data_ptr<scalar_t>();
scalar_t* result_data_ptr = result_buffer.data_ptr<scalar_t>();
int64_t* result_offsets_ptr = result_offsets.data_ptr<int64_t>();
nested_op_dense_kernelLauncher(
self_data_ptr,
other_data_ptr,
result_data_ptr,
batch_size,
embedding_size,
result_offsets_ptr,
f);
}
void _nested_op_dense_esuhm_cuda(Tensor& result, const Tensor& self, const Tensor& other, const NESTED_DENSE_OP& op) {
AT_DISPATCH_ALL_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "_nested_op_dense_esuhm", [&]() {
switch (op) {
case NESTED_DENSE_OP::ADD :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a + b; });
break;
case NESTED_DENSE_OP::MUL :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a * b; });
break;
}
});
}
REGISTER_CUDA_DISPATCH(nested_dense_elementwise_stub, &_nested_op_dense_esuhm_cuda);
} // namespace native
} // namespace at
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.