hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b043fe798f9f1a72b6704e86cb54bb114d2e08c7.hip | // !!! This is a file automatically generated by hipify!!!
//#include<iostream>
//#include<fstream>
//#include<math.h>
//#include<string>
//#include<stdio.h>
//#include<cuda_runtime.h>
//#include<device_launch_parameters.h>
//using namespace std;
#include <iostream>
#include <fstream>
#include <iomanip>
#include <string>
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "bmpio.h"
//using namespace std;
using std::ifstream;
using std::string;
using std::cout;
using std::endl;
using std::ios;
using std::setiosflags;
using std::setprecision;
using namespace cv;
//#define length 8
#define PI 3.14159265
#define length 256
#define block_len 16
#define IMG_WIDTH 480
#define IMG_HEIGHT 270
hipError_t dctWithCuda_1(const float *d, float *D);
hipError_t dctWithCuda_2(const float *f, float *F, int nwidth, int nheight);
void dct(float *f, float *F){
int i,j,t;
//float data[length]={0.0};
float tmp;
float data[length] = {0.0};
for(t=0; t<length; t++)
{
for (i=0; i<length; i++)
data[i] = f[t*length+i];//load row data from f.
for(i=0; i<length; i++)
{
if(i==0)
{
tmp = (float)(1.0/sqrt(1.0*length));
F[t*length+i] = 0.0;//why use F[bid]? Do transpose at the same time.
for(j=0; j<length; j++)
F[t*length+i] +=data[j] ;
F[t*length] *= tmp;
}
else
{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[t*length+i] = 0;
for(j=0; j<length; j++)
F[t*length+i] += (float)(data[j]*cos((2*j+1)*i*PI/(2.0*length)));
F[t*length+i] *= tmp;
}
}
}
}
for(t=0; t<length; t++)
{
for(i=0; i<length; i++)
data[i] = F[i*length+t];
for(i=0; i<length; i++)
{
if(i==0)
{
tmp=(float)(1.0/sqrt(1.0*length));
F[t]=0;
for(j=0; j<length; j++)
F[t] += data[j];
F[t] *= tmp;
}
else
{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++)
{
F[i*length+t] = 0;
for(j=0; j<length; j++)
F[i*length+t] += (float)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[i*length+t] *= tmp;
}
}
}
}
}
__global__ void dct_1(const float *f,float *F){
int bid = blockIdx.x;
//int tid = threadIdx.x;
int i,j;
//float data[length]={0.0};
float tmp;
//printf("");
if(bid<length){
float data[length];
for (i=0; i<length; i++)
data[i] = f[bid*length+i];//load row data from f.
__syncthreads();
for(i=0; i<length; i++){
if(i==0){
tmp = (float)(1.0/sqrt(1.0*length));
F[bid * length + i] = 0.0;
for(j=0; j<length; j++)
F[bid*length+i] +=data[j] ;
F[bid*length] *= tmp;
}
else{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[bid*length+i] = 0;
for(j=0; j<length; j++)
F[bid*length+i] += (float)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[bid*length+i] *= tmp;
}
}
}
__syncthreads();
for(i=0; i<length; i++)
data[i] = F[i*length+bid];
__syncthreads();
for(i=0; i<length; i++){
if(i==0){
tmp=(float)(1.0/sqrt(1.0*length));
F[bid]=0;
for(j=0; j<length; j++)
F[bid] += data[j];
F[bid] *= tmp;
}
else{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[i*length+bid] = 0;
for(j=0; j<length; j++)
F[i*length+bid] += (float)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[i*length+bid] *= tmp;
}
}
}
__syncthreads();
}
}
__global__ void dct_2(const float *f, float *F, int nwidth, int nheight){
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int index = tidy * nwidth + tidx;
int i;
float tmp;
float beta ,alfa;
if(tidx == 0)
beta = sqrt(1.0/ nwidth);
else
beta = sqrt(2.0/ nwidth);
if(tidy == 0)
alfa = sqrt(1.0/ nheight);
else
alfa = sqrt(2.0/ nheight);
if(tidx< nwidth && tidy< nheight)
{
for(i=0; i< nwidth * nheight; i++)
{
int x = i % nwidth;
int y = i / nwidth;
tmp += f[i]*::cos((2*x+1)*tidx*PI/(2.0*nwidth))*
::cos((2*y+1)*tidy*PI/(2.0*nheight));
}
F[index]=(float)alfa * beta * tmp;
}
}
int main(){
ifstream infile("gradient.txt");
int i=0;
string line;
//float f[length*length] = {0,0};
//float F0[length*length] = {0.0};
//float F1[length*length] = {0.0};
//float F2[length*length] = {0.0};
float *f = (float*)malloc(length*length * sizeof(float));
float *F0 = (float*)malloc(length*length * sizeof(float));
float *F1 = (float*)malloc(length*length * sizeof(float));
float *F2 = (float*)malloc(IMG_WIDTH *IMG_HEIGHT * sizeof(float));
while(i<length*length){
if(getline(infile, line))
{
f[i] = atof(line.c_str());
}
i++;
}
Mat reference_frame, rendition_frame, next_reference_frame, next_rendition_frame;
Mat reference_frame_v, rendition_frame_v, next_reference_frame_v, next_rendition_frame_v;
Mat reference_frame_float, rendition_frame_float, reference_dct, rendition_dct;
reference_frame = imread("d:/tmp/bmptest/reference_frame.bmp");
rendition_frame = imread("d:/tmp/bmptest/rendition_frame.bmp");
next_reference_frame = imread("d:/tmp/bmptest/next_reference_frame.bmp");
next_rendition_frame = imread("d:/tmp/bmptest/next_rendition_frame.bmp");
cvtColor(reference_frame, reference_frame_v, COLOR_BGR2HSV);
cvtColor(rendition_frame, rendition_frame_v, COLOR_BGR2HSV);
cvtColor(next_reference_frame, next_reference_frame_v, COLOR_BGR2HSV);
//cvtColor(next_rendition_frame, next_rendition_frame_v, COLOR_BGR2HSV);
extractChannel(reference_frame_v, reference_frame_v, 2);
extractChannel(rendition_frame_v, rendition_frame_v, 2);
extractChannel(next_reference_frame_v, next_reference_frame_v, 2);
//extractChannel(next_rendition_frame_v, next_rendition_frame_v, 2);
reference_frame_v.convertTo(reference_frame_v, CV_32FC1, 1.0 / 255.0);
rendition_frame_v.convertTo(rendition_frame_v, CV_32FC1, 1.0 / 255.0);
// cout<<"before"<<endl;
// for(i=0; i<length*length; i++){
// cout<<f[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
// cout<<endl;
// for(i=0; i<length*length; i++){
// cout<<F1[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
clock_t star0, end0;
clock_t star1, end1;
clock_t star2, end2;
//use event to record time
//float time0 = 0;
float time1 = 0;
float time2 = 0;
// hipEvent_t start0, stop0;
// hipEventCreate(&start0);
// hipEventCreate(&stop0);
hipEvent_t start1, stop1;
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEvent_t start2, stop2;
hipEventCreate(&start2);
hipEventCreate(&stop2);
/*
* excute dct()
*/
star0 = clock();
dct(f,F0);
end0 = clock();
// cout<<"----------------dct()-----------"<<endl;
// for (i = 0; i < length * length; i++)
// {
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F0[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
#if 0
/*
* excute dct_1()
*/
star1 = clock();
hipEventRecord(start1, 0 );
hipError_t cudaStatus = dctWithCuda_1(f,F1);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "dctWithCuda_1 failed!");
return 1;
}
end1 = clock();
hipEventRecord(stop1, 0 );
hipEventSynchronize(start1);
hipEventSynchronize(stop1);
hipEventElapsedTime(&time1,start1,stop1);
printf("excute1 time: %f (ms)\n",time1);
hipEventDestroy(start1); //destory the event
hipEventDestroy(stop1);
#endif
// cout<<"----------------dct_1()-----------"<<endl;
// for (i = 0; i < length * length; i++)
// {
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F1[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
/*
* excute dct_2()
*/
star2 = clock();
hipEventRecord(start2, 0 );
hipError_t cudaStatus_ = dctWithCuda_2((float*)reference_frame_v.data,F2, IMG_WIDTH, IMG_HEIGHT);
if (cudaStatus_ != hipSuccess)
{
fprintf(stderr, "dctWithCuda_1 failed!");
return 1;
}
hipEventRecord(stop2, 0 );
end2 = clock();
hipEventSynchronize(start2);
hipEventSynchronize(stop2);
hipEventElapsedTime(&time2,start2,stop2);
printf("excute2 time: %f (ms)\n",time2);
hipEventDestroy(start2); //destory the event
hipEventDestroy(stop2);
WriteFloatBmp("d:/tmp/bmptest/2ddct_frame.bmp", 480, 270, (float*)F2);
cv::gpu::GpuMat gmatreference_frame_v, gmatreference_dct, gmatrendition_dct, gmatdiff_dct;
gmatreference_frame_v.upload(reference_frame_v);
cv::gpu::dct2d(gmatreference_frame_v, gmatreference_dct);
Mat tmp_frame;
gmatreference_dct.download(tmp_frame);
WriteFloatBmp("d:/tmp/bmptest/2gpu_dct_reference_frame.bmp", 480, 270, (float*)tmp_frame.data);
// for (i = 0; i < length * length; i++)
// {
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F2[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
//time
cout<<"----------------clock()-----------"<<endl;
cout<< "dct() timeused="<<end0-star0<<"ms"<<endl;
cout<< "dct_1() timeused="<<end1-star1<<"ms"<<endl;
cout<< "dct_2() timeused="<<end2-star2<<"ms"<<endl;
// cout<<"after"<<endl;
// for(i=0; i<length*length; i++){
// cout<<f[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
// cout<<endl;
// for(i=0; i<length*length; i++){
// cout<<F[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
return 0;
}
hipError_t dctWithCuda_1(const float *d, float *D){
float *dev_d = 0;
float *dev_D = 0;
float time=0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_d,length *length* sizeof(float));
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_D,length *length* sizeof(float));
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
//copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_d, d,length *length*sizeof(float),hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipMemcpy-- failed");
goto Error;
}
//launch a kernel on the GPU
hipLaunchKernelGGL(( dct_1), dim3(length),dim3(1), 0, 0, dev_d, dev_D);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0 );
cudaStatus = hipMemcpy(D, dev_D, length*length* sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipEventRecord(stop, 0 );
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time,start,stop);
printf("copy1 time: %f (ms)\n",time);
Error:
hipFree(dev_d);
hipFree(dev_D);
hipEventDestroy(start); //destory the event
hipEventDestroy(stop);
return cudaStatus;
}
hipError_t dctWithCuda_2(const float *d, float *D, int nwidth, int nheight){
float *dev_d = 0;
float *dev_D = 0;
float time=0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_d, nwidth * nheight * sizeof(float));
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_D, nwidth * nheight * sizeof(float));
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
//copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_d, d, nwidth * nheight * sizeof(float),hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess){
fprintf(stderr, "hipMalloc failed");
goto Error;
}
dim3 grid(1, 1, 1);
dim3 dimblock(16, 16);
grid.x = (nwidth + 15) / 16;
grid.y = (nheight + 15) / 16;
hipLaunchKernelGGL(( dct_2) , dim3(grid), dim3(dimblock) , 0, 0, dev_d, dev_D, nwidth, nheight);
//launch a kernel on the GPU
//dct_2<<<1, (nwidth /block_len)*(nheight /block_len), block_len*block_len>>>(dev_d, dev_D, nwidth, nheight);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0 );
cudaStatus = hipMemcpy(D, dev_D, nwidth*nheight * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipEventRecord(stop, 0 );
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time,start,stop);
printf("copy2 time: %f (ms)\n",time);
Error:
hipFree(dev_d);
hipFree(dev_D);
hipEventDestroy(start); //destory the event
hipEventDestroy(stop);
return cudaStatus;
}
| b043fe798f9f1a72b6704e86cb54bb114d2e08c7.cu | //#include<iostream>
//#include<fstream>
//#include<math.h>
//#include<string>
//#include<stdio.h>
//#include<cuda_runtime.h>
//#include<device_launch_parameters.h>
//using namespace std;
#include <iostream>
#include <fstream>
#include <iomanip>
#include <string>
#include <cmath>
#include <cstdio>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "bmpio.h"
//using namespace std;
using std::ifstream;
using std::string;
using std::cout;
using std::endl;
using std::ios;
using std::setiosflags;
using std::setprecision;
using namespace cv;
//#define length 8
#define PI 3.14159265
#define length 256
#define block_len 16
#define IMG_WIDTH 480
#define IMG_HEIGHT 270
cudaError_t dctWithCuda_1(const float *d, float *D);
cudaError_t dctWithCuda_2(const float *f, float *F, int nwidth, int nheight);
void dct(float *f, float *F){
int i,j,t;
//float data[length]={0.0};
float tmp;
float data[length] = {0.0};
for(t=0; t<length; t++)
{
for (i=0; i<length; i++)
data[i] = f[t*length+i];//load row data from f.
for(i=0; i<length; i++)
{
if(i==0)
{
tmp = (float)(1.0/sqrt(1.0*length));
F[t*length+i] = 0.0;//why use F[bid]? Do transpose at the same time.
for(j=0; j<length; j++)
F[t*length+i] +=data[j] ;
F[t*length] *= tmp;
}
else
{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[t*length+i] = 0;
for(j=0; j<length; j++)
F[t*length+i] += (float)(data[j]*cos((2*j+1)*i*PI/(2.0*length)));
F[t*length+i] *= tmp;
}
}
}
}
for(t=0; t<length; t++)
{
for(i=0; i<length; i++)
data[i] = F[i*length+t];
for(i=0; i<length; i++)
{
if(i==0)
{
tmp=(float)(1.0/sqrt(1.0*length));
F[t]=0;
for(j=0; j<length; j++)
F[t] += data[j];
F[t] *= tmp;
}
else
{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++)
{
F[i*length+t] = 0;
for(j=0; j<length; j++)
F[i*length+t] += (float)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[i*length+t] *= tmp;
}
}
}
}
}
__global__ void dct_1(const float *f,float *F){
int bid = blockIdx.x;
//int tid = threadIdx.x;
int i,j;
//float data[length]={0.0};
float tmp;
//printf("");
if(bid<length){
float data[length];
for (i=0; i<length; i++)
data[i] = f[bid*length+i];//load row data from f.
__syncthreads();
for(i=0; i<length; i++){
if(i==0){
tmp = (float)(1.0/sqrt(1.0*length));
F[bid * length + i] = 0.0;
for(j=0; j<length; j++)
F[bid*length+i] +=data[j] ;
F[bid*length] *= tmp;
}
else{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[bid*length+i] = 0;
for(j=0; j<length; j++)
F[bid*length+i] += (float)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[bid*length+i] *= tmp;
}
}
}
__syncthreads();
for(i=0; i<length; i++)
data[i] = F[i*length+bid];
__syncthreads();
for(i=0; i<length; i++){
if(i==0){
tmp=(float)(1.0/sqrt(1.0*length));
F[bid]=0;
for(j=0; j<length; j++)
F[bid] += data[j];
F[bid] *= tmp;
}
else{
tmp = (float)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[i*length+bid] = 0;
for(j=0; j<length; j++)
F[i*length+bid] += (float)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[i*length+bid] *= tmp;
}
}
}
__syncthreads();
}
}
__global__ void dct_2(const float *f, float *F, int nwidth, int nheight){
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int index = tidy * nwidth + tidx;
int i;
float tmp;
float beta ,alfa;
if(tidx == 0)
beta = sqrt(1.0/ nwidth);
else
beta = sqrt(2.0/ nwidth);
if(tidy == 0)
alfa = sqrt(1.0/ nheight);
else
alfa = sqrt(2.0/ nheight);
if(tidx< nwidth && tidy< nheight)
{
for(i=0; i< nwidth * nheight; i++)
{
int x = i % nwidth;
int y = i / nwidth;
tmp += f[i]*::cos((2*x+1)*tidx*PI/(2.0*nwidth))*
::cos((2*y+1)*tidy*PI/(2.0*nheight));
}
F[index]=(float)alfa * beta * tmp;
}
}
int main(){
ifstream infile("gradient.txt");
int i=0;
string line;
//float f[length*length] = {0,0};
//float F0[length*length] = {0.0};
//float F1[length*length] = {0.0};
//float F2[length*length] = {0.0};
float *f = (float*)malloc(length*length * sizeof(float));
float *F0 = (float*)malloc(length*length * sizeof(float));
float *F1 = (float*)malloc(length*length * sizeof(float));
float *F2 = (float*)malloc(IMG_WIDTH *IMG_HEIGHT * sizeof(float));
while(i<length*length){
if(getline(infile, line))
{
f[i] = atof(line.c_str());
}
i++;
}
Mat reference_frame, rendition_frame, next_reference_frame, next_rendition_frame;
Mat reference_frame_v, rendition_frame_v, next_reference_frame_v, next_rendition_frame_v;
Mat reference_frame_float, rendition_frame_float, reference_dct, rendition_dct;
reference_frame = imread("d:/tmp/bmptest/reference_frame.bmp");
rendition_frame = imread("d:/tmp/bmptest/rendition_frame.bmp");
next_reference_frame = imread("d:/tmp/bmptest/next_reference_frame.bmp");
next_rendition_frame = imread("d:/tmp/bmptest/next_rendition_frame.bmp");
cvtColor(reference_frame, reference_frame_v, COLOR_BGR2HSV);
cvtColor(rendition_frame, rendition_frame_v, COLOR_BGR2HSV);
cvtColor(next_reference_frame, next_reference_frame_v, COLOR_BGR2HSV);
//cvtColor(next_rendition_frame, next_rendition_frame_v, COLOR_BGR2HSV);
extractChannel(reference_frame_v, reference_frame_v, 2);
extractChannel(rendition_frame_v, rendition_frame_v, 2);
extractChannel(next_reference_frame_v, next_reference_frame_v, 2);
//extractChannel(next_rendition_frame_v, next_rendition_frame_v, 2);
reference_frame_v.convertTo(reference_frame_v, CV_32FC1, 1.0 / 255.0);
rendition_frame_v.convertTo(rendition_frame_v, CV_32FC1, 1.0 / 255.0);
// cout<<"before"<<endl;
// for(i=0; i<length*length; i++){
// cout<<f[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
// cout<<endl;
// for(i=0; i<length*length; i++){
// cout<<F1[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
clock_t star0, end0;
clock_t star1, end1;
clock_t star2, end2;
//use event to record time
//float time0 = 0;
float time1 = 0;
float time2 = 0;
// cudaEvent_t start0, stop0;
// cudaEventCreate(&start0);
// cudaEventCreate(&stop0);
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEvent_t start2, stop2;
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
/*
* excute dct()
*/
star0 = clock();
dct(f,F0);
end0 = clock();
// cout<<"----------------dct()-----------"<<endl;
// for (i = 0; i < length * length; i++)
// {
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F0[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
#if 0
/*
* excute dct_1()
*/
star1 = clock();
cudaEventRecord(start1, 0 );
cudaError_t cudaStatus = dctWithCuda_1(f,F1);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "dctWithCuda_1 failed!");
return 1;
}
end1 = clock();
cudaEventRecord(stop1, 0 );
cudaEventSynchronize(start1);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&time1,start1,stop1);
printf("excute1 time: %f (ms)\n",time1);
cudaEventDestroy(start1); //destory the event
cudaEventDestroy(stop1);
#endif
// cout<<"----------------dct_1()-----------"<<endl;
// for (i = 0; i < length * length; i++)
// {
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F1[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
/*
* excute dct_2()
*/
star2 = clock();
cudaEventRecord(start2, 0 );
cudaError_t cudaStatus_ = dctWithCuda_2((float*)reference_frame_v.data,F2, IMG_WIDTH, IMG_HEIGHT);
if (cudaStatus_ != cudaSuccess)
{
fprintf(stderr, "dctWithCuda_1 failed!");
return 1;
}
cudaEventRecord(stop2, 0 );
end2 = clock();
cudaEventSynchronize(start2);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&time2,start2,stop2);
printf("excute2 time: %f (ms)\n",time2);
cudaEventDestroy(start2); //destory the event
cudaEventDestroy(stop2);
WriteFloatBmp("d:/tmp/bmptest/2ddct_frame.bmp", 480, 270, (float*)F2);
cv::gpu::GpuMat gmatreference_frame_v, gmatreference_dct, gmatrendition_dct, gmatdiff_dct;
gmatreference_frame_v.upload(reference_frame_v);
cv::gpu::dct2d(gmatreference_frame_v, gmatreference_dct);
Mat tmp_frame;
gmatreference_dct.download(tmp_frame);
WriteFloatBmp("d:/tmp/bmptest/2gpu_dct_reference_frame.bmp", 480, 270, (float*)tmp_frame.data);
// for (i = 0; i < length * length; i++)
// {
// /* GPU can't calculate floating number precisely
// * 0 will be a very small floating number.
// * so print this numbers with 7 digits after decimal point
// */
// cout << setiosflags(ios::right)
// << setiosflags(ios::fixed) << setprecision(7)
// << F2[i] << "\t";
// if ((i + 1) % length == 0)
// cout << endl;
// }
//time
cout<<"----------------clock()-----------"<<endl;
cout<< "dct() timeused="<<end0-star0<<"ms"<<endl;
cout<< "dct_1() timeused="<<end1-star1<<"ms"<<endl;
cout<< "dct_2() timeused="<<end2-star2<<"ms"<<endl;
// cout<<"after"<<endl;
// for(i=0; i<length*length; i++){
// cout<<f[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
// cout<<endl;
// for(i=0; i<length*length; i++){
// cout<<F[i]<<" ";
// if ((i+1)%length==0)
// cout<<endl;
// }
return 0;
}
cudaError_t dctWithCuda_1(const float *d, float *D){
float *dev_d = 0;
float *dev_D = 0;
float time=0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_d,length *length* sizeof(float));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_D,length *length* sizeof(float));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_d, d,length *length*sizeof(float),cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMemcpy-- failed");
goto Error;
}
//launch a kernel on the GPU
dct_1<<<length,1>>>(dev_d, dev_D);
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0 );
cudaStatus = cudaMemcpy(D, dev_D, length*length* sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventRecord(stop, 0 );
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
printf("copy1 time: %f (ms)\n",time);
Error:
cudaFree(dev_d);
cudaFree(dev_D);
cudaEventDestroy(start); //destory the event
cudaEventDestroy(stop);
return cudaStatus;
}
cudaError_t dctWithCuda_2(const float *d, float *D, int nwidth, int nheight){
float *dev_d = 0;
float *dev_D = 0;
float time=0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_d, nwidth * nheight * sizeof(float));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_D, nwidth * nheight * sizeof(float));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_d, d, nwidth * nheight * sizeof(float),cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed");
goto Error;
}
dim3 grid(1, 1, 1);
dim3 dimblock(16, 16);
grid.x = (nwidth + 15) / 16;
grid.y = (nheight + 15) / 16;
dct_2 <<<grid, dimblock >>> (dev_d, dev_D, nwidth, nheight);
//launch a kernel on the GPU
//dct_2<<<1, (nwidth /block_len)*(nheight /block_len), block_len*block_len>>>(dev_d, dev_D, nwidth, nheight);
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0 );
cudaStatus = cudaMemcpy(D, dev_D, nwidth*nheight * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventRecord(stop, 0 );
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
printf("copy2 time: %f (ms)\n",time);
Error:
cudaFree(dev_d);
cudaFree(dev_D);
cudaEventDestroy(start); //destory the event
cudaEventDestroy(stop);
return cudaStatus;
}
|
b958c7a33c2aa940185c352f0deb52aee99446ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <c10/core/GeneratorImpl.h>
#include <algorithm>
#include <hipcub/hipcub.hpp>
#include "caffe2/sgd/adagrad_fused_op_gpu.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
void inclusive_scan_wrapper(
const int* length_data,
int num_lengths,
Tensor* temp_buffer,
Tensor* prefix_sum_out,
CUDAContext* context_) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(
NULL,
temp_storage_bytes,
length_data,
prefix_sum_out->template mutable_data<int>(),
num_lengths,
context_->cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int);
temp_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(temp_buffer->template mutable_data<int>());
// Run inclusive prefix sum
hipcub::DeviceScan::InclusiveSum(
d_temp_storage,
temp_storage_bytes,
length_data,
prefix_sum_out->template mutable_data<int>(),
num_lengths,
context_->cuda_stream());
}
template <typename SIndex>
void sort_pairs_wrapper(
int num_indices,
int num_rows,
Tensor* temp_buffer,
const Tensor* linear_ind_buffer_,
Tensor* sorted_linear_ind_buffer_,
const Tensor* seg_id_buffer_,
Tensor* sorted_seg_id_buffer_,
CUDAContext* context_) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_ind_buffer_->template data<SIndex>(),
sorted_linear_ind_buffer_->template mutable_data<SIndex>(),
seg_id_buffer_->template data<int>(),
sorted_seg_id_buffer_->template mutable_data<int>(),
num_indices,
0,
int(log2(float(num_rows)) + 1),
context_->cuda_stream(),
false);
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int);
temp_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(temp_buffer->template mutable_data<int>());
hipcub::DeviceRadixSort::SortPairs(
d_temp_storage,
temp_storage_bytes,
linear_ind_buffer_->template data<SIndex>(),
sorted_linear_ind_buffer_->template mutable_data<SIndex>(),
seg_id_buffer_->template data<int>(),
sorted_seg_id_buffer_->template mutable_data<int>(),
num_indices,
0,
int(log2(float(num_rows)) + 1),
context_->cuda_stream(),
false);
}
template <typename T>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void gradient_mean_kernel(
const T* __restrict__ grad_in,
const int* __restrict__ lengths,
T* __restrict__ grad_out,
int block_size) {
int group = blockIdx.x;
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
grad_out[group * block_size + i] = lengths[group] > 0
? grad_in[group * block_size + i] / lengths[group]
: grad_in[group * block_size + i];
}
}
template <typename SIndex, typename TParam, typename T, bool ExactBlock = false>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void sparse_adagrad_fused_length_sum_gradient_kernel(
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
// (offsets for the
// segments)
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
const float epsilon,
TParam* param,
TParam* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
CUDA_KERNEL_ASSERT(start <= N);
CUDA_KERNEL_ASSERT(end <= N);
if (ExactBlock) {
const size_t gradIdx = group * block_size + threadIdx.x; // index for grad
for (int line = start + threadIdx.y; line < end; line += blockDim.y) {
// line: the idx in the indices
// threadIdx.x: index in the embedding dimension
const SIndex index =
indices[line]; // the index-th row in the embedding table
const size_t paramIdx =
index * block_size + threadIdx.x; // index for param
float gi = grad[gradIdx] + weight_decay * param[paramIdx];
float mom_new = gi * gi + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
} else {
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
// i: index in the embedding dimension
const size_t gradIdx = group * block_size + i; // index for grad
for (int line = start; line < end; ++line) {
// line: the idx in the indices
const SIndex index =
indices[line]; // the index row in the embedding table
const size_t paramIdx = index * block_size + i; // index for param
float gi = grad[gradIdx] + weight_decay * param[paramIdx];
float mom_new = gi * gi + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new =
LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
}
}
}
template <typename SIndex, typename TParam, typename T, int NumThreads>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void sparse_adagrad_fused_length_weighted_sum_gradient_kernel(
const int* __restrict__ prefix_sum_length_data,
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
const float epsilon,
TParam* param,
TParam* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const T* __restrict__ weights,
T* __restrict__ weights_grad_out,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
CUDA_KERNEL_ASSERT(start <= N);
CUDA_KERNEL_ASSERT(end <= N);
// TODO: Tuning NumThreads for w_grad
typedef hipcub::BlockReduce<float, NumThreads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
// TODO(jianyuhuang): parallelize this outer loop
for (int line = start; line < end; ++line) {
T w_grad = 0;
// line: the idx in the indices
const SIndex index =
indices[line]; // the index-th row in the embedding table
// SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp also fuses
// LengthsRangeFill + Gather operator. In the normal SLWS operator weight
// is accessed via weights[line] but in most cases the weights are
// generated by LengthsRangeFill and Gather operator.
// For example, if lengths is [2, 3, 1] LengthsRangeFill will generate [0,
// 1; 0, 1, 2; 0] and they are used as indices of Gather.
// So if we fuse all of these, weights[line] just becomes
// weights[line - start].
auto in_weight_temp = weights[line - start];
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
// i: index in the embedding dimension
const size_t gradIdx = group * block_size + i; // index for in_grad
const size_t paramIdx = index * block_size + i; // index for param
// TODO: trying to reduce the variable number (common subexpression
// elimination).
auto in_grad_temp = grad[gradIdx];
w_grad += in_grad_temp * param[paramIdx];
auto out_grad_temp =
in_weight_temp * in_grad_temp + weight_decay * param[paramIdx];
// TODO: split it into two kernels to make it more similar to exact
// fusion kernel (not Approx on CPUs).
float mom_new = out_grad_temp * out_grad_temp + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new =
LR * out_grad_temp / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
w_grad = BlockReduce(temp_storage).Reduce(w_grad, hipcub::Sum());
if (threadIdx.x == 0) {
weights_grad_out[line] = w_grad;
}
__syncthreads();
}
}
// Construct a reverse map of offset_of_idx -> segment_id.
template <typename SIndex>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void linear_index_weight_offsets_dedup_kernel(
const SIndex* indices,
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
int* __restrict__ seg_id_data // segment id
) {
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
for (int line = start + threadIdx.x; line < end; line += blockDim.x) {
// line: the idx in the indices
seg_id_data[line] = group;
}
}
template <
typename SIndex,
typename TParam,
typename T,
bool ExactBlock = false,
roundOption roundOpt = NEAREST>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel(
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
// (offsets for the
// segments)
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
int num_indices, // number of indices
const float epsilon,
TParam* param,
T* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const SIndex* sorted_linear_ind_data, // sorted linear indices
const int* __restrict__ sorted_seg_id_data, // sorted segment id
const float* lr,
ulong2 seed,
float weight_decay = 0.f) {
class randFactor<TParam, T, roundOpt> rand_factor(
seed,
blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x);
const float LR = lr[0];
// num_indices blocks, each block process one index
int sorted_linear_indice_id = blockIdx.x; // the index of sorted_linear_ind
if (sorted_linear_indice_id >= num_indices) {
// don't have warp divergence when embedding dim is multiple of 32
return;
}
// check if this thread block is responsible for this whole linear index
bool linear_index_start =
(sorted_linear_indice_id == 0 ||
sorted_linear_ind_data[sorted_linear_indice_id - 1] !=
sorted_linear_ind_data[sorted_linear_indice_id]);
if (!linear_index_start) {
// don't have warp divergence when embedding dim is multiple of 32
return;
}
// the index row in the embedding table
SIndex index = sorted_linear_ind_data[sorted_linear_indice_id];
// find the num of duplicated indices.
int num_dup = 1;
while (sorted_linear_indice_id + num_dup < num_indices &&
sorted_linear_ind_data[sorted_linear_indice_id + num_dup] == index) {
num_dup += 1;
}
// TODO: Tuning NumThreads for sum_squares
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(block_size, blockDim.x);
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
// i: index in the embedding dimension
float x_ij = 0.0;
for (int dup_id = 0; dup_id < num_dup; dup_id++) {
int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id];
x_ij += grad[group * block_size + i];
}
x_ij += weight_decay *
rand_factor.convertTypeFromParamToTarget(param[index * block_size + i]);
sum_squares += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_result / static_cast<float>(block_size);
float mom_new = param_mom[index] + static_cast<T>(row_sum_squares_avg);
param_mom[index] = mom_new;
}
__syncthreads();
// update param
float step = LR / (sqrtf(param_mom[index]) + epsilon);
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
float x_ij = 0.0;
for (int dup_id = 0; dup_id < num_dup; dup_id++) {
int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id];
x_ij += grad[group * block_size + i];
}
const size_t paramIdx = index * block_size + i; // index for param
x_ij += weight_decay * rand_factor.convertTypeFromParamToTarget(param[paramIdx]);
param[paramIdx] =
rand_factor.convertTypeFromTargetToParam(param[paramIdx] + x_ij * step);
}
}
template <typename SIndex, typename TParam, typename T, int NumThreads>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__
void rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel(
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
// (offsets for the
// segments)
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
const float epsilon,
TParam* param,
T* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const T* __restrict__ weights,
T* __restrict__ weights_grad_out,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
CUDA_KERNEL_ASSERT(start <= N);
CUDA_KERNEL_ASSERT(end <= N);
// TODO: Tuning NumThreads for w_grad
typedef hipcub::BlockReduce<float, NumThreads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int valid = min(block_size, blockDim.x);
// for avg_square_weight. Can we reuse temp_storage
__shared__ typename BlockReduce::TempStorage temp_storage2;
// TODO(jianyuhuang): parallelize this outer loop
for (int line = start; line < end; ++line) {
T w_grad = 0;
// i: index in the embedding dimension
const SIndex index = indices[line];
auto in_weight_temp = weights[line - start];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
const float x_ij = grad[group * block_size + i] +
weight_decay * param[index * block_size + i];
sum_squares += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage2).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_result / static_cast<float>(block_size);
param_mom[index] +=
static_cast<T>(row_sum_squares_avg * in_weight_temp * in_weight_temp);
}
__syncthreads();
// update param
float step = LR / (sqrtf(param_mom[index]) + epsilon);
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
const size_t gradIdx = group * block_size + i; // index for in_grad
const size_t paramIdx = index * block_size + i; // index for param
// TODO: trying to reduce the variable number (common subexpression
// elimination).
auto in_grad_temp = grad[gradIdx];
w_grad += in_grad_temp * param[paramIdx];
auto out_grad_temp =
in_weight_temp * in_grad_temp + weight_decay * param[paramIdx];
// TODO: split it into two kernels to make it more similar to exact
// fusion kernel (not Approx on CPUs).
param[paramIdx] = out_grad_temp * step + param[paramIdx];
}
w_grad = BlockReduce(temp_storage).Reduce(w_grad, hipcub::Sum());
if (threadIdx.x == 0) {
weights_grad_out[line] = w_grad;
}
__syncthreads();
}
}
} // namespace
template <typename T, typename TLengths, bool is_mean, class Context>
class CUDASparseAdagradFusedWithSparseLengthsSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDASparseAdagradFusedWithSparseLengthSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lengths = lengthsInput.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (is_mean) {
grad_buffer_.ResizeLike(segmentGradsInput);
}
auto* grad_buffer_data =
is_mean ? grad_buffer_.template mutable_data<T>() : NULL;
if (is_mean) {
hipLaunchKernelGGL(( gradient_mean_kernel<T>)
, dim3(num_lengths), dim3(::min(maxThreads, block_size)), 0, context_.cuda_stream(),
grad, lengths, grad_buffer_data, block_size);
}
if (block_size <= maxThreads) {
int multiple = ::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS);
dim3 block(block_size, multiple);
// calling cuda kernel with ExactBlock = true
hipLaunchKernelGGL(( sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
true>), dim3(num_lengths), dim3(block), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
weight_decay_);
} else {
// calling cuda kernel with ExactBlock = false
hipLaunchKernelGGL(( sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
false>), dim3(num_lengths), dim3(maxThreads), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
Tensor grad_buffer_{CUDA};
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <typename T, typename TLengths, class Context>
class CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDASparseAdagradFusedWithSparseLengthWeightedSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
// Allocate output to an empty tensor
Output(AUX_GRAD, n, at::dtype<T>());
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
auto& weightsInput = Input(AUX_PARAM);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
auto* weightGradsOutput =
Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>());
T* out_weight_grads = weightGradsOutput->template mutable_data<T>();
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
const T* weights = weightsInput.template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (block_size > 128) {
hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
512>), dim3(num_lengths), dim3(512), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 64) {
hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
128>), dim3(num_lengths), dim3(128), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 32) {
hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
64>), dim3(num_lengths), dim3(64), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else {
hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
32>), dim3(num_lengths), dim3(32), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD);
};
template <typename T, typename TLengths, bool is_mean, class Context>
class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
round_option_((roundOption)this->template GetSingleArgument<int>(
"round_option",
NEAREST)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// number of rows: input(embedding/momentum) ==
// outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).dim(0),
Input(MOMENT_1).dim(0),
"Input Param number of rows: ",
Input(PARAM).dim(0),
" Input Moment size: ",
Input(MOMENT_1).dim(0));
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lengths = lengthsInput.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
ulong2 seed;
if (is_mean) {
grad_buffer_.ResizeLike(segmentGradsInput);
}
auto* grad_buffer_data =
is_mean ? grad_buffer_.template mutable_data<T>() : NULL;
if (is_mean) {
hipLaunchKernelGGL(( gradient_mean_kernel<T>)
, dim3(num_lengths), dim3(::min(maxThreads, block_size)), 0, context_.cuda_stream(),
grad, lengths, grad_buffer_data, block_size);
}
// 0: nearest rounding
// 1: stochastic rounding
if (round_option_ == STOCHASTIC) {
seed.x = default_rng_seed_val;
seed.y = maxThreads * block_size;
}
if (block_size <= maxThreads / 2 && block_size % 32 == 0) {
// Fast path when the embedding dimension is a multiple of 32, using
// WarpReduce.
int multiple = ::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS);
dim3 block(block_size, multiple);
if (round_option_ == STOCHASTIC) {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
true,
STOCHASTIC>), dim3(num_lengths), dim3(block), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
} else {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
true,
NEAREST>), dim3(num_lengths), dim3(block), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
}
} else {
if (round_option_) {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
false,
STOCHASTIC>)
, dim3(num_lengths),
dim3(::min(maxThreads, block_size)),
0,
context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
} else {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
false,
NEAREST>)
, dim3(num_lengths),
dim3(::min(maxThreads, block_size)),
0,
context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
}
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
Tensor grad_buffer_{CUDA};
protected:
T epsilon_;
roundOption round_option_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <typename T, typename TLengths, bool is_mean, class Context>
class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
round_option_((roundOption)this->template GetSingleArgument<int>(
"round_option",
NEAREST)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp"
<< " weight_decay_=" << weight_decay_;
CAFFE_ENFORCE(
round_option_ == STOCHASTIC || round_option_ == NEAREST,
"round_option_ should be either NEAREST or STOCHATIC");
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// number of rows: input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).dim(0),
Input(MOMENT_1).dim(0),
"Input Param number of rows: ",
Input(PARAM).dim(0),
" Input Moment size: ",
Input(MOMENT_1).dim(0));
const int num_lengths = lengthsInput.dim(0);
const int num_indices = indicesInput.dim(0);
const int num_rows = Input(PARAM).dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lengths = lengthsInput.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (is_mean) {
grad_buffer_.ResizeLike(segmentGradsInput);
}
auto* grad_buffer_data =
is_mean ? grad_buffer_.template mutable_data<T>() : NULL;
if (is_mean) {
hipLaunchKernelGGL(( gradient_mean_kernel<T>)
, dim3(num_lengths),
dim3(::min(maxThreads, block_size)),
0,
context_.cuda_stream(),
grad, lengths, grad_buffer_data, block_size);
}
sorted_linear_ind_buffer_.ResizeLike(indicesInput);
seg_id_buffer_.ResizeLike(indicesInput);
sorted_seg_id_buffer_.ResizeLike(indicesInput);
hipLaunchKernelGGL(( linear_index_weight_offsets_dedup_kernel<IndexType>)
, dim3(num_lengths),
dim3(32),
0,
context_.cuda_stream(),
indices,
prefix_sum_length_data,
seg_id_buffer_.template mutable_data<int>());
sort_pairs_wrapper<IndexType>(
num_indices,
num_rows,
&sort_buffer_,
&indicesInput,
&sorted_linear_ind_buffer_,
&seg_id_buffer_,
&sorted_seg_id_buffer_,
&context_);
ulong2 seed;
// 0: nearest rounding
// 1: stochastic rounding
if (round_option_ == STOCHASTIC) {
seed.x = default_rng_seed_val;
seed.y = maxThreads * block_size;
}
if (round_option_ == STOCHASTIC) {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel<
IndexType,
TParam,
T,
false,
STOCHASTIC>)
, dim3(num_indices),
dim3(::min(maxThreads, block_size)),
0,
context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
num_indices,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
sorted_linear_ind_buffer_.template data<IndexType>(),
sorted_seg_id_buffer_.template data<int>(),
lr,
seed,
weight_decay_);
} else {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel<
IndexType,
TParam,
T,
false,
NEAREST>)
, dim3(num_indices),
dim3(::min(maxThreads, block_size)),
0,
context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
num_indices,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
sorted_linear_ind_buffer_.template data<IndexType>(),
sorted_seg_id_buffer_.template data<int>(),
lr,
seed,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
Tensor sort_buffer_{CUDA};
Tensor sorted_linear_ind_buffer_{CUDA};
Tensor seg_id_buffer_{CUDA};
Tensor sorted_seg_id_buffer_{CUDA};
Tensor grad_buffer_{CUDA};
protected:
T epsilon_;
roundOption round_option_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <typename T, typename TLengths, class Context>
class CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1)
<< "gradient optimization operator in use: "
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthWeightedSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
Output(AUX_GRAD, n, at::dtype<T>());
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
auto& weightsInput = Input(AUX_PARAM);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// number of rows: input(embedding/momentum) ==
// outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).dim(0),
Input(MOMENT_1).dim(0),
"Input Param number of rows: ",
Input(PARAM).dim(0),
" Input Moment size: ",
Input(MOMENT_1).dim(0));
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
auto* weightGradsOutput =
Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>());
T* out_weight_grads = weightGradsOutput->template mutable_data<T>();
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
const T* weights = weightsInput.template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (block_size > 128) {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
512>), dim3(num_lengths), dim3(512), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 64) {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
128>), dim3(num_lengths), dim3(128), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 32) {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
64>), dim3(num_lengths), dim3(64), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else {
hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
32>), dim3(num_lengths), dim3(32), 0, context_.cuda_stream(),
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD);
};
// For GPU, the implementation of the exact and approx (RowWise)SparseAdagrad
// fusion are both approximate implementations.
// When we don't have the duplicated indices, the outputs are the same as the
// CPU implementation.
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsSumGradient,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsSumGradientApprox,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsMeanGradient,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsMeanGradientApprox,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsWeightedSumGradient,
CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox,
CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsSumGradient,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsSumGradientApprox,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsMeanGradient,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsMeanGradientApprox,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradient,
CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox,
CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
} // namespace caffe2
#undef SEGREDUCE_MINBLOCKS
| b958c7a33c2aa940185c352f0deb52aee99446ea.cu | #include <ATen/ATen.h>
#include <c10/core/GeneratorImpl.h>
#include <algorithm>
#include <cub/device/device_radix_sort.cuh>
#include "caffe2/sgd/adagrad_fused_op_gpu.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
void inclusive_scan_wrapper(
const int* length_data,
int num_lengths,
Tensor* temp_buffer,
Tensor* prefix_sum_out,
CUDAContext* context_) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(
NULL,
temp_storage_bytes,
length_data,
prefix_sum_out->template mutable_data<int>(),
num_lengths,
context_->cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int);
temp_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(temp_buffer->template mutable_data<int>());
// Run inclusive prefix sum
cub::DeviceScan::InclusiveSum(
d_temp_storage,
temp_storage_bytes,
length_data,
prefix_sum_out->template mutable_data<int>(),
num_lengths,
context_->cuda_stream());
}
template <typename SIndex>
void sort_pairs_wrapper(
int num_indices,
int num_rows,
Tensor* temp_buffer,
const Tensor* linear_ind_buffer_,
Tensor* sorted_linear_ind_buffer_,
const Tensor* seg_id_buffer_,
Tensor* sorted_seg_id_buffer_,
CUDAContext* context_) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_ind_buffer_->template data<SIndex>(),
sorted_linear_ind_buffer_->template mutable_data<SIndex>(),
seg_id_buffer_->template data<int>(),
sorted_seg_id_buffer_->template mutable_data<int>(),
num_indices,
0,
int(log2(float(num_rows)) + 1),
context_->cuda_stream(),
false);
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int);
temp_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(temp_buffer->template mutable_data<int>());
cub::DeviceRadixSort::SortPairs(
d_temp_storage,
temp_storage_bytes,
linear_ind_buffer_->template data<SIndex>(),
sorted_linear_ind_buffer_->template mutable_data<SIndex>(),
seg_id_buffer_->template data<int>(),
sorted_seg_id_buffer_->template mutable_data<int>(),
num_indices,
0,
int(log2(float(num_rows)) + 1),
context_->cuda_stream(),
false);
}
template <typename T>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void gradient_mean_kernel(
const T* __restrict__ grad_in,
const int* __restrict__ lengths,
T* __restrict__ grad_out,
int block_size) {
int group = blockIdx.x;
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
grad_out[group * block_size + i] = lengths[group] > 0
? grad_in[group * block_size + i] / lengths[group]
: grad_in[group * block_size + i];
}
}
template <typename SIndex, typename TParam, typename T, bool ExactBlock = false>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void sparse_adagrad_fused_length_sum_gradient_kernel(
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
// (offsets for the
// segments)
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
const float epsilon,
TParam* param,
TParam* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
CUDA_KERNEL_ASSERT(start <= N);
CUDA_KERNEL_ASSERT(end <= N);
if (ExactBlock) {
const size_t gradIdx = group * block_size + threadIdx.x; // index for grad
for (int line = start + threadIdx.y; line < end; line += blockDim.y) {
// line: the idx in the indices
// threadIdx.x: index in the embedding dimension
const SIndex index =
indices[line]; // the index-th row in the embedding table
const size_t paramIdx =
index * block_size + threadIdx.x; // index for param
float gi = grad[gradIdx] + weight_decay * param[paramIdx];
float mom_new = gi * gi + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
} else {
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
// i: index in the embedding dimension
const size_t gradIdx = group * block_size + i; // index for grad
for (int line = start; line < end; ++line) {
// line: the idx in the indices
const SIndex index =
indices[line]; // the index row in the embedding table
const size_t paramIdx = index * block_size + i; // index for param
float gi = grad[gradIdx] + weight_decay * param[paramIdx];
float mom_new = gi * gi + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new =
LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
}
}
}
template <typename SIndex, typename TParam, typename T, int NumThreads>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void sparse_adagrad_fused_length_weighted_sum_gradient_kernel(
const int* __restrict__ prefix_sum_length_data,
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
const float epsilon,
TParam* param,
TParam* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const T* __restrict__ weights,
T* __restrict__ weights_grad_out,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
CUDA_KERNEL_ASSERT(start <= N);
CUDA_KERNEL_ASSERT(end <= N);
// TODO: Tuning NumThreads for w_grad
typedef cub::BlockReduce<float, NumThreads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
// TODO(jianyuhuang): parallelize this outer loop
for (int line = start; line < end; ++line) {
T w_grad = 0;
// line: the idx in the indices
const SIndex index =
indices[line]; // the index-th row in the embedding table
// SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp also fuses
// LengthsRangeFill + Gather operator. In the normal SLWS operator weight
// is accessed via weights[line] but in most cases the weights are
// generated by LengthsRangeFill and Gather operator.
// For example, if lengths is [2, 3, 1] LengthsRangeFill will generate [0,
// 1; 0, 1, 2; 0] and they are used as indices of Gather.
// So if we fuse all of these, weights[line] just becomes
// weights[line - start].
auto in_weight_temp = weights[line - start];
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
// i: index in the embedding dimension
const size_t gradIdx = group * block_size + i; // index for in_grad
const size_t paramIdx = index * block_size + i; // index for param
// TODO: trying to reduce the variable number (common subexpression
// elimination).
auto in_grad_temp = grad[gradIdx];
w_grad += in_grad_temp * param[paramIdx];
auto out_grad_temp =
in_weight_temp * in_grad_temp + weight_decay * param[paramIdx];
// TODO: split it into two kernels to make it more similar to exact
// fusion kernel (not Approx on CPUs).
float mom_new = out_grad_temp * out_grad_temp + param_mom[paramIdx];
param_mom[paramIdx] = mom_new;
float param_new =
LR * out_grad_temp / (sqrtf(mom_new) + epsilon) + param[paramIdx];
param[paramIdx] = param_new;
}
w_grad = BlockReduce(temp_storage).Reduce(w_grad, cub::Sum());
if (threadIdx.x == 0) {
weights_grad_out[line] = w_grad;
}
__syncthreads();
}
}
// Construct a reverse map of offset_of_idx -> segment_id.
template <typename SIndex>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void linear_index_weight_offsets_dedup_kernel(
const SIndex* indices,
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
int* __restrict__ seg_id_data // segment id
) {
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
for (int line = start + threadIdx.x; line < end; line += blockDim.x) {
// line: the idx in the indices
seg_id_data[line] = group;
}
}
template <
typename SIndex,
typename TParam,
typename T,
bool ExactBlock = false,
roundOption roundOpt = NEAREST>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__ void rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel(
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
// (offsets for the
// segments)
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
int num_indices, // number of indices
const float epsilon,
TParam* param,
T* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const SIndex* sorted_linear_ind_data, // sorted linear indices
const int* __restrict__ sorted_seg_id_data, // sorted segment id
const float* lr,
ulong2 seed,
float weight_decay = 0.f) {
class randFactor<TParam, T, roundOpt> rand_factor(
seed,
blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x);
const float LR = lr[0];
// num_indices blocks, each block process one index
int sorted_linear_indice_id = blockIdx.x; // the index of sorted_linear_ind
if (sorted_linear_indice_id >= num_indices) {
// don't have warp divergence when embedding dim is multiple of 32
return;
}
// check if this thread block is responsible for this whole linear index
bool linear_index_start =
(sorted_linear_indice_id == 0 ||
sorted_linear_ind_data[sorted_linear_indice_id - 1] !=
sorted_linear_ind_data[sorted_linear_indice_id]);
if (!linear_index_start) {
// don't have warp divergence when embedding dim is multiple of 32
return;
}
// the index row in the embedding table
SIndex index = sorted_linear_ind_data[sorted_linear_indice_id];
// find the num of duplicated indices.
int num_dup = 1;
while (sorted_linear_indice_id + num_dup < num_indices &&
sorted_linear_ind_data[sorted_linear_indice_id + num_dup] == index) {
num_dup += 1;
}
// TODO: Tuning NumThreads for sum_squares
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ BlockReduce::TempStorage temp_storage;
int valid = min(block_size, blockDim.x);
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
// i: index in the embedding dimension
float x_ij = 0.0;
for (int dup_id = 0; dup_id < num_dup; dup_id++) {
int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id];
x_ij += grad[group * block_size + i];
}
x_ij += weight_decay *
rand_factor.convertTypeFromParamToTarget(param[index * block_size + i]);
sum_squares += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_result / static_cast<float>(block_size);
float mom_new = param_mom[index] + static_cast<T>(row_sum_squares_avg);
param_mom[index] = mom_new;
}
__syncthreads();
// update param
float step = LR / (sqrtf(param_mom[index]) + epsilon);
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
float x_ij = 0.0;
for (int dup_id = 0; dup_id < num_dup; dup_id++) {
int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id];
x_ij += grad[group * block_size + i];
}
const size_t paramIdx = index * block_size + i; // index for param
x_ij += weight_decay * rand_factor.convertTypeFromParamToTarget(param[paramIdx]);
param[paramIdx] =
rand_factor.convertTypeFromTargetToParam(param[paramIdx] + x_ij * step);
}
}
template <typename SIndex, typename TParam, typename T, int NumThreads>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS)
#endif
__global__
void rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel(
const int* __restrict__ prefix_sum_length_data, // prefix of lengths
// (offsets for the
// segments)
int N, // number of rows (hash size) of embedding table
int block_size, // embedding dimension size
int num_lengths, // number of segments
const float epsilon,
TParam* param,
T* param_mom,
const SIndex* indices,
const T* __restrict__ grad,
const T* __restrict__ weights,
T* __restrict__ weights_grad_out,
const float* lr,
float weight_decay = 0.f) {
const float LR = lr[0];
// num_lengths blocks, each block process one segment
int group = blockIdx.x; // the group-th segment
int start = group == 0
? 0
: prefix_sum_length_data[group - 1]; // start offset of the segment
int end = prefix_sum_length_data[group]; // end offset of the segment
CUDA_KERNEL_ASSERT(start <= N);
CUDA_KERNEL_ASSERT(end <= N);
// TODO: Tuning NumThreads for w_grad
typedef cub::BlockReduce<float, NumThreads> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int valid = min(block_size, blockDim.x);
// for avg_square_weight. Can we reuse temp_storage
__shared__ typename BlockReduce::TempStorage temp_storage2;
// TODO(jianyuhuang): parallelize this outer loop
for (int line = start; line < end; ++line) {
T w_grad = 0;
// i: index in the embedding dimension
const SIndex index = indices[line];
auto in_weight_temp = weights[line - start];
float sum_squares = 0.0;
__shared__ float row_sum_squares_avg;
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
const float x_ij = grad[group * block_size + i] +
weight_decay * param[index * block_size + i];
sum_squares += x_ij * x_ij;
}
float reduce_result = BlockReduce(temp_storage2).Sum(sum_squares, valid);
if (threadIdx.x == 0) {
row_sum_squares_avg = reduce_result / static_cast<float>(block_size);
param_mom[index] +=
static_cast<T>(row_sum_squares_avg * in_weight_temp * in_weight_temp);
}
__syncthreads();
// update param
float step = LR / (sqrtf(param_mom[index]) + epsilon);
for (int i = threadIdx.x; i < block_size; i += blockDim.x) {
const size_t gradIdx = group * block_size + i; // index for in_grad
const size_t paramIdx = index * block_size + i; // index for param
// TODO: trying to reduce the variable number (common subexpression
// elimination).
auto in_grad_temp = grad[gradIdx];
w_grad += in_grad_temp * param[paramIdx];
auto out_grad_temp =
in_weight_temp * in_grad_temp + weight_decay * param[paramIdx];
// TODO: split it into two kernels to make it more similar to exact
// fusion kernel (not Approx on CPUs).
param[paramIdx] = out_grad_temp * step + param[paramIdx];
}
w_grad = BlockReduce(temp_storage).Reduce(w_grad, cub::Sum());
if (threadIdx.x == 0) {
weights_grad_out[line] = w_grad;
}
__syncthreads();
}
}
} // namespace
template <typename T, typename TLengths, bool is_mean, class Context>
class CUDASparseAdagradFusedWithSparseLengthsSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDASparseAdagradFusedWithSparseLengthSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lengths = lengthsInput.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (is_mean) {
grad_buffer_.ResizeLike(segmentGradsInput);
}
auto* grad_buffer_data =
is_mean ? grad_buffer_.template mutable_data<T>() : NULL;
if (is_mean) {
gradient_mean_kernel<T>
<<<num_lengths, std::min(maxThreads, block_size), 0, context_.cuda_stream()>>>(
grad, lengths, grad_buffer_data, block_size);
}
if (block_size <= maxThreads) {
int multiple = std::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS);
dim3 block(block_size, multiple);
// calling cuda kernel with ExactBlock = true
sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
true><<<num_lengths, block, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
weight_decay_);
} else {
// calling cuda kernel with ExactBlock = false
sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
false><<<num_lengths, maxThreads, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
Tensor grad_buffer_{CUDA};
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <typename T, typename TLengths, class Context>
class CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDASparseAdagradFusedWithSparseLengthWeightedSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
// Allocate output to an empty tensor
Output(AUX_GRAD, n, at::dtype<T>());
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
auto& weightsInput = Input(AUX_PARAM);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
auto* weightGradsOutput =
Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>());
T* out_weight_grads = weightGradsOutput->template mutable_data<T>();
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
const T* weights = weightsInput.template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (block_size > 128) {
sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
512><<<num_lengths, 512, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 64) {
sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
128><<<num_lengths, 128, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 32) {
sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
64><<<num_lengths, 64, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else {
sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
32><<<num_lengths, 32, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD);
};
template <typename T, typename TLengths, bool is_mean, class Context>
class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
round_option_((roundOption)this->template GetSingleArgument<int>(
"round_option",
NEAREST)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// number of rows: input(embedding/momentum) ==
// outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).dim(0),
Input(MOMENT_1).dim(0),
"Input Param number of rows: ",
Input(PARAM).dim(0),
" Input Moment size: ",
Input(MOMENT_1).dim(0));
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lengths = lengthsInput.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
ulong2 seed;
if (is_mean) {
grad_buffer_.ResizeLike(segmentGradsInput);
}
auto* grad_buffer_data =
is_mean ? grad_buffer_.template mutable_data<T>() : NULL;
if (is_mean) {
gradient_mean_kernel<T>
<<<num_lengths, std::min(maxThreads, block_size), 0, context_.cuda_stream()>>>(
grad, lengths, grad_buffer_data, block_size);
}
// 0: nearest rounding
// 1: stochastic rounding
if (round_option_ == STOCHASTIC) {
seed.x = default_rng_seed_val;
seed.y = maxThreads * block_size;
}
if (block_size <= maxThreads / 2 && block_size % 32 == 0) {
// Fast path when the embedding dimension is a multiple of 32, using
// WarpReduce.
int multiple = std::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS);
dim3 block(block_size, multiple);
if (round_option_ == STOCHASTIC) {
rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
true,
STOCHASTIC><<<num_lengths, block, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
} else {
rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
true,
NEAREST><<<num_lengths, block, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
}
} else {
if (round_option_) {
rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
false,
STOCHASTIC>
<<<num_lengths,
std::min(maxThreads, block_size),
0,
context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
} else {
rowwise_sparse_adagrad_fused_length_sum_gradient_kernel<
IndexType,
TParam,
T,
false,
NEAREST>
<<<num_lengths,
std::min(maxThreads, block_size),
0,
context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
lr,
seed,
weight_decay_);
}
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
Tensor grad_buffer_{CUDA};
protected:
T epsilon_;
roundOption round_option_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <typename T, typename TLengths, bool is_mean, class Context>
class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
round_option_((roundOption)this->template GetSingleArgument<int>(
"round_option",
NEAREST)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp"
<< " weight_decay_=" << weight_decay_;
CAFFE_ENFORCE(
round_option_ == STOCHASTIC || round_option_ == NEAREST,
"round_option_ should be either NEAREST or STOCHATIC");
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// number of rows: input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).dim(0),
Input(MOMENT_1).dim(0),
"Input Param number of rows: ",
Input(PARAM).dim(0),
" Input Moment size: ",
Input(MOMENT_1).dim(0));
const int num_lengths = lengthsInput.dim(0);
const int num_indices = indicesInput.dim(0);
const int num_rows = Input(PARAM).dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lengths = lengthsInput.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (is_mean) {
grad_buffer_.ResizeLike(segmentGradsInput);
}
auto* grad_buffer_data =
is_mean ? grad_buffer_.template mutable_data<T>() : NULL;
if (is_mean) {
gradient_mean_kernel<T>
<<<num_lengths,
std::min(maxThreads, block_size),
0,
context_.cuda_stream()>>>(
grad, lengths, grad_buffer_data, block_size);
}
sorted_linear_ind_buffer_.ResizeLike(indicesInput);
seg_id_buffer_.ResizeLike(indicesInput);
sorted_seg_id_buffer_.ResizeLike(indicesInput);
linear_index_weight_offsets_dedup_kernel<IndexType>
<<<num_lengths,
32,
0,
context_.cuda_stream()>>>(
indices,
prefix_sum_length_data,
seg_id_buffer_.template mutable_data<int>());
sort_pairs_wrapper<IndexType>(
num_indices,
num_rows,
&sort_buffer_,
&indicesInput,
&sorted_linear_ind_buffer_,
&seg_id_buffer_,
&sorted_seg_id_buffer_,
&context_);
ulong2 seed;
// 0: nearest rounding
// 1: stochastic rounding
if (round_option_ == STOCHASTIC) {
seed.x = default_rng_seed_val;
seed.y = maxThreads * block_size;
}
if (round_option_ == STOCHASTIC) {
rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel<
IndexType,
TParam,
T,
false,
STOCHASTIC>
<<<num_indices,
std::min(maxThreads, block_size),
0,
context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
num_indices,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
sorted_linear_ind_buffer_.template data<IndexType>(),
sorted_seg_id_buffer_.template data<int>(),
lr,
seed,
weight_decay_);
} else {
rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel<
IndexType,
TParam,
T,
false,
NEAREST>
<<<num_indices,
std::min(maxThreads, block_size),
0,
context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
num_indices,
epsilon_,
paramOut,
momentOut,
indices,
is_mean ? grad_buffer_data : grad,
sorted_linear_ind_buffer_.template data<IndexType>(),
sorted_seg_id_buffer_.template data<int>(),
lr,
seed,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
Tensor sort_buffer_{CUDA};
Tensor sorted_linear_ind_buffer_{CUDA};
Tensor seg_id_buffer_{CUDA};
Tensor sorted_seg_id_buffer_{CUDA};
Tensor grad_buffer_{CUDA};
protected:
T epsilon_;
roundOption round_option_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <typename T, typename TLengths, class Context>
class CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
: public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1)
<< "gradient optimization operator in use: "
<< "CUDARowWiseSparseAdagradFusedWithSparseLengthWeightedSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0f);
CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(LR).size(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).ndim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto n = Input(INDICES).size();
if (n == 0) {
Output(AUX_GRAD, n, at::dtype<T>());
return true;
}
return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call(
this, Input(PARAM));
}
template <typename IndexType, typename TParam>
bool DoRunWithType2() {
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
auto& indicesInput = Input(INDICES);
auto& weightsInput = Input(AUX_PARAM);
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector");
CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0);
// Enforce:
// number of rows: input(embedding/momentum) ==
// outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).dim(0),
Input(MOMENT_1).dim(0),
"Input Param number of rows: ",
Input(PARAM).dim(0),
" Input Moment size: ",
Input(MOMENT_1).dim(0));
const int num_lengths = lengthsInput.dim(0);
CAFFE_ENFORCE(segmentGradsInput.dim() > 0);
CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0));
int output_0dim = indicesInput.dim(0);
auto* weightGradsOutput =
Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>());
T* out_weight_grads = weightGradsOutput->template mutable_data<T>();
if (num_lengths <= 0) {
// return early to avoid invalid empty kernel
return true;
}
inclusive_scan_length_buffer_.ResizeLike(lengthsInput);
inclusive_scan_wrapper(
lengthsInput.template data<int>(),
num_lengths,
&inclusive_scan_buffer_,
&inclusive_scan_length_buffer_,
&context_);
// compute output size using length
auto* prefix_sum_length_data =
inclusive_scan_length_buffer_.template data<int>();
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<IndexType>();
const T* grad = Input(GRAD).template data<T>();
const T* weights = weightsInput.template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();
int N = output_0dim;
int block_size = segmentGradsInput.size_from_dim(1);
auto maxThreads =
GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock;
if (block_size > 128) {
rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
512><<<num_lengths, 512, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 64) {
rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
128><<<num_lengths, 128, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else if (block_size > 32) {
rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
64><<<num_lengths, 64, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
} else {
rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel<
IndexType,
TParam,
T,
32><<<num_lengths, 32, 0, context_.cuda_stream()>>>(
prefix_sum_length_data,
N,
block_size,
num_lengths,
epsilon_,
paramOut,
momentOut,
indices,
grad,
weights,
out_weight_grads,
lr,
weight_decay_);
}
return true;
}
private:
// member field to manage memory
Tensor inclusive_scan_buffer_{CUDA};
Tensor inclusive_scan_length_buffer_{CUDA};
protected:
T epsilon_;
T weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD);
};
// For GPU, the implementation of the exact and approx (RowWise)SparseAdagrad
// fusion are both approximate implementations.
// When we don't have the duplicated indices, the outputs are the same as the
// CPU implementation.
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsSumGradient,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsSumGradientApprox,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsMeanGradient,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsMeanGradientApprox,
CUDASparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsWeightedSumGradient,
CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
SparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox,
CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsSumGradient,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsSumGradientApprox,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
false,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsMeanGradient,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsMeanGradientApprox,
CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp<
float,
int,
true,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradient,
CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox,
CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp<
float,
int,
CUDAContext>);
} // namespace caffe2
#undef SEGREDUCE_MINBLOCKS
|
e1e2d20a6a63a71ff371fe57042808b622a568b6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_1x1_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cusparse_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
sparse_fully_connected_1x1_layer_tester_cuda::sparse_fully_connected_1x1_layer_tester_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_1x1_layer_tester_cuda::~sparse_fully_connected_1x1_layer_tester_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_1x1_layer_tester_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
{
cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
hipsparseMatDescr_t mat_descr;
cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr));
cusparse_safe_call(hipsparseScsrmm(
cuda_config->get_cusparse_handle(),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*input_buffer,
input_elem_count_per_entry,
&beta,
*additional_buffers[0],
output_elem_count_per_entry));
}
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
1));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
CUDNN_ADD_SAME_C,
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*additional_buffers[0]));
}
}
std::vector<size_t> sparse_fully_connected_1x1_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
cuda_linear_buffer_device_smart_ptr sparse_fully_connected_1x1_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
void sparse_fully_connected_1x1_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
cudnn_safe_call(cudnnSetTensor4dDescriptor(
bias_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_configuration_specific.feature_map_count,
1,
1));
}
}
}
| e1e2d20a6a63a71ff371fe57042808b622a568b6.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_1x1_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cusparse_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
sparse_fully_connected_1x1_layer_tester_cuda::sparse_fully_connected_1x1_layer_tester_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_1x1_layer_tester_cuda::~sparse_fully_connected_1x1_layer_tester_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_1x1_layer_tester_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
{
cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cusparseMatDescr_t mat_descr;
cusparse_safe_call(cusparseCreateMatDescr(&mat_descr));
cusparse_safe_call(cusparseScsrmm(
cuda_config->get_cusparse_handle(),
CUSPARSE_OPERATION_NON_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*input_buffer,
input_elem_count_per_entry,
&beta,
*additional_buffers[0],
output_elem_count_per_entry));
}
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
1));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
CUDNN_ADD_SAME_C,
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*additional_buffers[0]));
}
}
std::vector<size_t> sparse_fully_connected_1x1_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
cuda_linear_buffer_device_smart_ptr sparse_fully_connected_1x1_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
void sparse_fully_connected_1x1_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
cudnn_safe_call(cudnnSetTensor4dDescriptor(
bias_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_configuration_specific.feature_map_count,
1,
1));
}
}
}
|
edb35a4ef1dca804923587a75d47cac0d0152d3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <Windows.h>
#include "cpu_bitmap.h"
using namespace std;
#define DIM 800
struct hipComplex
{
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void )
{
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a)
{
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a)
{
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y )
{
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr )
{
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock
{
unsigned char *dev_bitmap;
};
int main( void )
{
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
hipError_t cudaStatus;
cudaStatus = hipMalloc( (void**)&dev_bitmap, bitmap.image_size() );
if (cudaStatus != hipSuccess)
{
cout << "hipMalloc failed!" << endl;
}
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap );
cudaStatus = hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost );
hipFree( dev_bitmap );
bitmap.display_and_exit();
}
| edb35a4ef1dca804923587a75d47cac0d0152d3d.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <Windows.h>
#include "cpu_bitmap.h"
using namespace std;
#define DIM 800
struct cuComplex
{
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void )
{
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a)
{
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a)
{
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia( int x, int y )
{
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel( unsigned char *ptr )
{
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock
{
unsigned char *dev_bitmap;
};
int main( void )
{
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
cudaError_t cudaStatus;
cudaStatus = cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() );
if (cudaStatus != cudaSuccess)
{
cout << "cudaMalloc failed!" << endl;
}
data.dev_bitmap = dev_bitmap;
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>( dev_bitmap );
cudaStatus = cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost );
cudaFree( dev_bitmap );
bitmap.display_and_exit();
}
|
ffe90496e1652b313de37d8901a7664e02a28c49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h> // Do we need this now?
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
#include <uint_to_char.h>
namespace quda {
namespace pack {
#include <dslash_constants.h>
#include <dslash_textures.h>
} // end namespace pack
using namespace pack;
#ifdef MULTI_GPU
static int commDim[QUDA_MAX_DIM]; // Whether to do comms or not
void setPackComms(const int *comm_dim) {
for (int i=0; i<4; i++) commDim[i] = comm_dim[i];
for (int i=4; i<QUDA_MAX_DIM; i++) commDim[i] = 0;
}
#else
void setPackComms(const int *comm_dim) { ; }
#endif
#define STRIPED
#ifdef STRIPED
#else
#define SWIZZLE
#endif
#include <dslash_index.cuh>
// routines for packing the ghost zones (multi-GPU only)
#ifdef MULTI_GPU
template <typename FloatN>
struct PackParam {
FloatN *out[2*4];
float *outNorm[2*4];
FloatN *in;
float *inNorm;
int_fastdiv threads; // total number of threads
// offsets which determine thread mapping to dimension
int threadDimMapLower[4]; // lowest thread which maps to dim
int threadDimMapUpper[4]; // greatest thread + 1 which maps to dim
int parity;
#ifdef USE_TEXTURE_OBJECTS
hipTextureObject_t inTex;
hipTextureObject_t inTexNorm;
#endif
int dim;
int face_num;
DslashConstant dc;
int sp_stride;
int_fastdiv swizzle;
int sites_per_block;
};
template<typename FloatN>
std::ostream& operator<<(std::ostream& output, const PackParam<FloatN>& param) {
output << "threads = " << param.threads << std::endl;
output << "threadDimMapLower = {" << param.threadDimMapLower[0] << "," <<
param.threadDimMapLower[1] << "," << param.threadDimMapLower[2] << "," << param.threadDimMapLower[3] << "}" << std::endl;
output << "threadDimMapUpper = {" << param.threadDimMapUpper[0] << "," <<
param.threadDimMapUpper[1] << "," << param.threadDimMapUpper[2] << "," << param.threadDimMapUpper[3] << "}" << std::endl;
output << "parity = " << param.parity << std::endl;
output << "dim = " << param.dim << std::endl;
output << "face_num = " << param.face_num << std::endl;
output << "X = {" << param.dc.X[0] << ","<< param.dc.X[1] << "," << param.dc.X[2] << "," << param.dc.X[3] << "," << param.dc.X[4] << "}" << std::endl;
output << "ghostFace = {" << param.dc.ghostFace[0] << ","<< param.dc.ghostFace[1] << ","
<< param.dc.ghostFace[2] << "," << param.dc.ghostFace[3] << "}" << std::endl;
output << "sp_stride = " << param.sp_stride << std::endl;
return output;
}
// Extend the PackParam class to PackExtendedParam
template<typename Float>
struct PackExtendedParam : public PackParam<Float>
{
PackExtendedParam(){}
PackExtendedParam(const PackParam<Float>& base) : PackParam<Float>(base) {}
int R[QUDA_MAX_DIM]; // boundary dimensions
};
#if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
// double precision
#if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX)
#define READ_SPINOR READ_SPINOR_DOUBLE
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_DOUBLE_TEX
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexDouble
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2
#define SPINOR_DOUBLE
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(double2 *out, float *outNorm, const double2 *in,
const float *inNorm, const int &idx,
const int &face_idx, const int &face_volume,
PackParam<double2> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(double2 *out, float *outNorm, const double2 *in,
const float *inNorm, const int &idx,
const int &face_idx, const int &face_volume,
PackParam<double2> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
#undef SPINOR_DOUBLE
// single precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_SINGLE
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_SINGLE_TEX
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexSingle
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<float4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<float4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// half precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_HALF
#define READ_SPINOR_UP READ_SPINOR_HALF_UP
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_HALF_TEX
#define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<short4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<short4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// quarter precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_QUARTER
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_QUARTER_TEX
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_CHAR4
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(char4 *out, float *outNorm, const char4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<char4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(char4 *out, float *outNorm, const char4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<char4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
template <int dagger, typename FloatN>
__global__ void packFaceWilsonKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <int dagger, typename FloatN, int nFace>
__global__ void packFaceExtendedWilsonKernel(PackParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice
// specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <int dagger, typename FloatN, int nFace>
__global__ void unpackFaceExtendedWilsonKernel(PackParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice
// specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param);
unpackFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param);
unpackFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param);
unpackFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param);
unpackFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param);
unpackFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param);
unpackFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param);
unpackFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param);
unpackFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC
#if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC)
#endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC
#if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC)
// double precision
#endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC
#if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC)
// double precision
#if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX)
#define READ_SPINOR READ_SPINOR_DOUBLE
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_DOUBLE_TEX
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexDouble
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2
#define SPINOR_DOUBLE
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(double2 *out, float *outNorm, const double2 *in,
const float *inNorm, double a, double b, const int &idx,
const int &face_idx, const int &face_volume,
PackParam<double2> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
#undef SPINOR_DOUBLE
// single precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_SINGLE
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_SINGLE_TEX
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexSingle
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, float a, float b,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<float4> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// half precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_HALF
#define READ_SPINOR_UP READ_SPINOR_HALF_UP
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_HALF_TEX
#define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, float a, float b,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<short4> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// quarter precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_QUARTER
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_QUARTER_TEX
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_CHAR4
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(char4 *out, float *outNorm, const char4 *in, const float *inNorm, float a, float b,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<char4> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
template <int dagger, typename FloatN, typename Float>
__global__ void packTwistedFaceWilsonKernel(Float a, Float b, PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, a, b,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif // GPU_TWISTED_MASS_DIRAC
template <typename FloatN, typename Float>
class PackFace : public Tunable {
protected:
void *faces[2*QUDA_MAX_DIM];
const cudaColorSpinorField *in;
const int dagger;
const int parity;
const int nFace;
const int dim;
const int face_num;
const MemoryLocation location;
// compute how many threads we need in total for the face packing
unsigned int threads() const {
unsigned int threads = 0;
if(dim < 0){ // if dim is negative, pack all dimensions
for (int i=0; i<4; i++) {
if (!commDim[i]) continue;
if ( i==3 && !getKernelPackT() ) continue;
threads += 2*nFace*in->GhostFace()[i]; // 2 for forwards and backwards faces
}
}else{ // pack only in dim dimension
if( commDim[dim] && (dim!=3 || getKernelPackT() )){
threads = nFace*in->GhostFace()[dim];
if(face_num==2) threads *= 2; // sending data forwards and backwards
}
}
return threads;
}
virtual int inputPerSite() const = 0;
virtual int outputPerSite() const = 0;
// prepare the param struct with kernel arguments
void prepareParam(PackParam<FloatN> ¶m, TuneParam &tp, int dim=-1, int face_num=2) {
param.in = (FloatN*)in->V();
param.inNorm = (float*)in->Norm();
param.dim = dim;
param.face_num = face_num;
param.parity = parity;
#ifdef USE_TEXTURE_OBJECTS
param.inTex = in->Tex();
param.inTexNorm = in->TexNorm();
#endif
param.threads = threads();
param.sp_stride = in->Stride();
int prev = -1; // previous dimension that was partitioned
for (int i=0; i<4; i++) {
param.threadDimMapLower[i] = 0;
param.threadDimMapUpper[i] = 0;
if (!commDim[i]) continue;
param.threadDimMapLower[i] = (prev>=0 ? param.threadDimMapUpper[prev] : 0);
param.threadDimMapUpper[i] = param.threadDimMapLower[i] + 2*nFace*in->GhostFace()[i];
param.out[2*i+0] = static_cast<FloatN*>(faces[2*i+0]);
param.out[2*i+1] = static_cast<FloatN*>(faces[2*i+1]);
param.outNorm[2*i+0] = reinterpret_cast<float*>(static_cast<char*>(faces[2*i+0]) + nFace*outputPerSite()*in->GhostFace()[i]*in->Precision());
param.outNorm[2*i+1] = reinterpret_cast<float*>(static_cast<char*>(faces[2*i+1]) + nFace*outputPerSite()*in->GhostFace()[i]*in->Precision());
prev=i;
}
param.dc = in->getDslashConstant(); // get pre-computed constants
param.swizzle = tp.aux.x;
param.sites_per_block = (param.threads + tp.grid.x - 1) / tp.grid.x;
}
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
#ifdef STRIPED
bool tuneGridDim() const { return true; } // If striping, always tune grid dimension
unsigned int maxGridSize() const {
if (location & Host) {
// if zero-copy policy then set a maximum number of blocks to be
// the 3 * number of dimensions we are communicating
int nDimComms = 0;
for (int d=0; d<in->Ndim(); d++) nDimComms += commDim[d];
return 3*nDimComms;
} else {
return Tunable::maxGridSize();
}
} // use no more than a quarter of the GPU
unsigned int minGridSize() const {
if (location & Host) {
// if zero-copy policy then set a maximum number of blocks to be
// the 1 * number of dimensions we are communicating
int nDimComms = 0;
for (int d=0; d<in->Ndim(); d++) nDimComms += commDim[d];
return nDimComms;
} else {
return Tunable::minGridSize();
}
}
#else
bool tuneGridDim() const { return location & Host; } // only tune grid dimension if doing zero-copy writing
unsigned int maxGridSize() const { return tuneGridDim ? deviceProp.multiProcessorCount/4 : Tunable::maxGridSize(); } // use no more than a quarter of the GPU
#endif
bool tuneAuxDim() const { return true; } // Do tune the aux dimensions.
unsigned int minThreads() const { return threads(); }
void fillAux() {
strcpy(aux,"policy_kernel,");
strcat(aux, in->AuxString());
char comm[5];
comm[0] = (commDim[0] ? '1' : '0');
comm[1] = (commDim[1] ? '1' : '0');
comm[2] = (commDim[2] ? '1' : '0');
comm[3] = (commDim[3] ? '1' : '0');
comm[4] = '\0'; strcat(aux,",comm=");
strcat(aux,comm);
strcat(aux,comm_dim_topology_string());
if (getKernelPackT()) { strcat(aux,",kernelPackT"); }
switch (nFace) {
case 1: strcat(aux,",nFace=1,"); break;
case 3: strcat(aux,",nFace=3,"); break;
default: errorQuda("Number of faces not supported");
}
// label the locations we are packing to
// location lable is nonp2p-p2p
switch ((int)location) {
case Device|Remote: strcat(aux,"device-remote"); break;
case Host|Remote: strcat(aux, "host-remote"); break;
case Device: strcat(aux,"device-device"); break;
case Host: strcat(aux, comm_peer2peer_enabled_global() ? "host-device" : "host-host"); break;
default: errorQuda("Unknown pack target location %d\n", location);
}
}
public:
PackFace(void *faces_[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity, const int nFace, const int dim=-1, const int face_num=2)
: in(in), dagger(dagger),
parity(parity), nFace(nFace), dim(dim), face_num(face_num), location(location)
{
memcpy(faces, faces_, 2*QUDA_MAX_DIM*sizeof(void*));
fillAux();
#ifndef USE_TEXTURE_OBJECTS
bindSpinorTex<FloatN>(in);
#endif
}
virtual ~PackFace() {
#ifndef USE_TEXTURE_OBJECTS
unbindSpinorTex<FloatN>(in);
#endif
}
bool tuneSharedBytes() const { return location & Host ? false : Tunable::tuneSharedBytes(); }
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if ( location & Remote ) { // only swizzling if we're doing remote writing
if (param.aux.x < (int)maxGridSize()) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
} else {
return false;
}
#else
return false;
#endif
}
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.aux.x = 1; // swizzle factor
// if doing a zero-copy policy then ensure that each thread block
// runs exclusively on a given SM - this is to ensure quality of
// service for the packing kernel when running concurrently.
if (location & Host) param.shared_bytes = deviceProp.sharedMemPerBlock / 2 + 1;
}
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.aux.x = 1; // swizzle factor
}
long long flops() const { return outputPerSite()*this->threads(); }
virtual int tuningIter() const { return 3; }
virtual TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name(), aux); }
virtual void apply(const hipStream_t &stream) = 0;
long long bytes() const {
size_t faceBytes = (inputPerSite() + outputPerSite())*this->threads()*sizeof(((FloatN*)0)->x);
if (sizeof(((FloatN*)0)->x) == QUDA_HALF_PRECISION)
faceBytes += 2*this->threads()*sizeof(float); // 2 is from input and output
return faceBytes;
}
};
template <typename FloatN, typename Float>
class PackFaceWilson : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceWilson(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceWilson() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_WILSON_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceWilsonKernel<1,FloatN>) : &(packFaceWilsonKernel<0,FloatN>);
qudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("Wilson face packing kernel is not built");
#endif
}
};
void packFaceWilson(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location,
const int dagger, const int parity, const hipStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceWilson<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceWilson<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceWilson<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceWilson<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
template <typename FloatN, typename Float>
class PackFaceTwisted : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
Float a;
Float b;
public:
PackFaceTwisted(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity, Float a, Float b)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1), a(a), b(b) { }
virtual ~PackFaceTwisted() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_TWISTED_MASS_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { &a, &b, ¶m };
void (*func)(Float,Float,PackParam<FloatN>) = this->dagger ? &(packTwistedFaceWilsonKernel<1,FloatN,Float>) : &(packTwistedFaceWilsonKernel<0,FloatN,Float>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("Twisted face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
//!
void packTwistedFaceWilson(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, const int dagger,
const int parity, const double a, const double b, const hipStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceTwisted<double2, double> pack(ghost_buf, &in, location, dagger, parity, a, b);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceTwisted<float4, float> pack(ghost_buf, &in, location, dagger, parity, (float)a, (float)b);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceTwisted<short4, float> pack(ghost_buf, &in, location, dagger, parity, (float)a, (float)b);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceTwisted<char4, float> pack(ghost_buf, &in, location, dagger, parity, (float)a, (float)b);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
#ifdef GPU_STAGGERED_DIRAC
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEXDOUBLE param.inTex
#define SPINORTEXSINGLE param.inTex
#define SPINORTEXHALF param.inTex
#define SPINORTEXHALFNORM param.inTexNorm
#define SPINORTEXQUARTER param.inTex
#define SPINORTEXQUARTERNORM param.inTexNorm
#else
#define SPINORTEXDOUBLE spinorTexDouble
#define SPINORTEXSINGLE spinorTexSingle2
#define SPINORTEXHALF spinorTexHalf2
#define SPINORTEXHALFNORM spinorTexHalf2Norm
#define SPINORTEXQUARTER spinorTexQuarter2
#define SPINORTEXQUARTERNORM spinorTexQuarter2Norm
#endif
template <typename Float2>
__device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx,
const int out_stride, const Float2 *in, const float *inNorm,
const int in_idx, const int in_stride) {
out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride];
}
template<>
__device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx,
const int out_stride, const short2 *in, const float *inNorm,
const int in_idx, const int in_stride) {
out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride];
outNorm[out_idx] = inNorm[in_idx];
}
template<>
__device__ void packFaceStaggeredCore(char2 *out, float *outNorm, const int out_idx,
const int out_stride, const char2 *in, const float *inNorm,
const int in_idx, const int in_stride) {
out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride];
outNorm[out_idx] = inNorm[in_idx];
}
#if (defined DIRECT_ACCESS_PACK) || (defined FERMI_NO_DBLE_TEX)
template <typename Float2>
__device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx,
const int out_stride, const Float2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride];
}
template<>
__device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx,
const int out_stride, const short2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride];
outNorm[out_idx] = inNorm[in_idx];
}
template<>
__device__ void packFaceStaggeredCore(char2 *out, float *outNorm, const int out_idx,
const int out_stride, const char2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride];
outNorm[out_idx] = inNorm[in_idx];
}
#else
__device__ void packFaceStaggeredCore(double2 *out, float *outNorm, const int out_idx,
const int out_stride, const double2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 0*param.sp_stride);
out[out_idx + 1*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 1*param.sp_stride);
out[out_idx + 2*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 2*param.sp_stride);
}
__device__ void packFaceStaggeredCore(float2 *out, float *outNorm, const int out_idx,
const int out_stride, const float2 *in,
const float *inNorm, const int in_idx,
const PackParam<float2> ¶m) {
out[out_idx + 0*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 0*param.sp_stride);
out[out_idx + 1*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 1*param.sp_stride);
out[out_idx + 2*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 2*param.sp_stride);
}
// this is rather dumb: undoing the texture load because cudaNormalizedReadMode is used
// should really bind to an appropriate texture instead of reusing
inline __device__ short2 float22short2(float c, float2 a) {
return make_short2((short)(a.x*(c*fixedMaxValue<short>::value)), (short)(a.y*(c*fixedMaxValue<short>::value)));
}
inline __device__ char2 float22char2(float c, float2 a) {
return make_char2((char)(a.x*(c*fixedMaxValue<char>::value)), (char)(a.y*(c*fixedMaxValue<char>::value)));
}
__device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx,
const int out_stride, const short2 *in,
const float *inNorm, const int in_idx,
const PackParam<short2> ¶m) {
out[out_idx + 0*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+0*param.sp_stride));
out[out_idx + 1*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+1*param.sp_stride));
out[out_idx + 2*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+2*param.sp_stride));
outNorm[out_idx] = TEX1DFETCH(float, SPINORTEXHALFNORM, in_idx);
}
__device__ void packFaceStaggeredCore(char2 *out, float *outNorm, const int out_idx,
const int out_stride, const char2 *in,
const float *inNorm, const int in_idx,
const PackParam<char2> ¶m) {
out[out_idx + 0*out_stride] = float22char2(1.0f,TEX1DFETCH(float2,SPINORTEXQUARTER,in_idx+0*param.sp_stride));
out[out_idx + 1*out_stride] = float22char2(1.0f,TEX1DFETCH(float2,SPINORTEXQUARTER,in_idx+1*param.sp_stride));
out[out_idx + 2*out_stride] = float22char2(1.0f,TEX1DFETCH(float2,SPINORTEXQUARTER,in_idx+2*param.sp_stride));
outNorm[out_idx] = TEX1DFETCH(float, SPINORTEXQUARTERNORM, in_idx);
}
#endif
template <typename FloatN, int nFace>
__global__ void packFaceStaggeredKernel(PackParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
const int Ls = param.dc.X[4];
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor and write to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<0,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx,
Ls*nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<0,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx,
Ls*nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<1,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx,
Ls*nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<1,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx,
Ls*nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<2,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx,
Ls*nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<2,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx,
Ls*nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<3,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx,
Ls*nFace*param.dc.ghostFace[3], param.in, param.inNorm,idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<3,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx,
Ls*nFace*param.dc.ghostFace[3], param.in, param.inNorm, idx, param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <typename FloatN, int nFace>
__global__ void packFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwise pack the region of the
// lattice specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx,
nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx,
nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx,
nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx,
nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx,
nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx,
nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx,
nFace*param.dc.ghostFace[3], param.in, param.inNorm,idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx,
nFace*param.dc.ghostFace[3], param.in, param.inNorm, idx, param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <typename FloatN, int nFace>
__global__ void unpackFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwist pack the region of the
// lattice specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[0], param.outNorm[0], face_idx, nFace*param.dc.ghostFace[0]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[1], param.outNorm[1], face_idx, nFace*param.dc.ghostFace[0]);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[2], param.outNorm[2], face_idx, nFace*param.dc.ghostFace[1]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[3], param.outNorm[3], face_idx, nFace*param.dc.ghostFace[1]);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[4], param.outNorm[4], face_idx, nFace*param.dc.ghostFace[2]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[5], param.outNorm[5], face_idx, nFace*param.dc.ghostFace[2]);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[6], param.outNorm[6], face_idx, nFace*param.dc.ghostFace[3]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[7], param.outNorm[7], face_idx, nFace*param.dc.ghostFace[3]);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#undef SPINORTEXDOUBLE
#undef SPINORTEXSINGLE
#undef SPINORTEXHALF
#undef SPINORTEXQUARTER
#endif // GPU_STAGGERED_DIRAC
template <typename FloatN, typename Float>
class PackFaceStaggered : public PackFace<FloatN, Float> {
private:
const int* R; // boundary dimensions for extended field
const bool unpack;
int inputPerSite() const { return 6; } // input is full spinor
int outputPerSite() const { return 6; } // output is full spinor
public:
PackFaceStaggered(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int nFace, const int dagger, const int parity,
const int dim, const int face_num, const int* R=NULL, const bool unpack=false)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, nFace, dim, face_num), R(R), unpack(unpack) { }
virtual ~PackFaceStaggered() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_STAGGERED_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp,this->dim,this->face_num);
if(!R){
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = PackFace<FloatN,Float>::nFace==1 ? &(packFaceStaggeredKernel<FloatN,1>) : &(packFaceStaggeredKernel<FloatN,3>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
}else{ // R!=NULL => this is an extended field
PackExtendedParam<FloatN> extendedParam(param);
if(!unpack){
for(int d=0; d<QUDA_MAX_DIM; ++d) extendedParam.R[d] = R[d];
switch(PackFace<FloatN,Float>::nFace){
case 1:
hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
case 2:
hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,2>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
case 3:
hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,3>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
case 4:
hipLaunchKernelGGL(( packFaceExtendedStaggeredKernel<FloatN,4>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
default:
errorQuda("Unsupported boundary width");
break;
}
}else{ // extended field unpack
switch(PackFace<FloatN,Float>::nFace){
case 1:
hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,1>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
case 2:
hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,2>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
case 3:
hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,3>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
case 4:
hipLaunchKernelGGL(( unpackFaceExtendedStaggeredKernel<FloatN,4>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, extendedParam);
break;
default:
errorQuda("Unsupported boundary width");
break;
}
}
}
#else
errorQuda("Staggered face packing kernel is not built");
#endif
}
long long flops() const { return 0; }
};
void packFaceStaggered(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, int nFace,
int dagger, int parity, const int dim, const int face_num, const hipStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceStaggered<double2, double> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceStaggered<float2, float> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceStaggered<short2, float> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceStaggered<char2, float> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
void packFaceExtendedStaggered(void *buffer[], cudaColorSpinorField &field, MemoryLocation location, const int nFace, const int R[],
int dagger, int parity, const int dim, const int face_num, const hipStream_t &stream, bool unpack=false)
{
switch(field.Precision()){
case QUDA_DOUBLE_PRECISION:
{
PackFaceStaggered<double2,double> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceStaggered<float2,float> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceStaggered<short2,float> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceStaggered<char2,float> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", field.Precision());
} // switch(field.Precision())
}
#ifdef GPU_DOMAIN_WALL_DIRAC
template <int dagger, typename FloatN>
__global__ void packFaceDWKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for dwf
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
const int Ls = param.dc.X[4];
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end
// FIXME these param.dc.ghostFace constants do not incude the Ls dimension
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <int dagger, typename FloatN>
__global__ void packFaceDW4DKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
const int Ls = param.dc.X[4];
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end
// FIXME these param.dc.ghostFace constants do not incude the Ls dimension
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif
template <typename FloatN, typename Float>
class PackFaceDW : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceDW(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceDW() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_DOMAIN_WALL_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceDWKernel<1,FloatN>) : &(packFaceDWKernel<0,FloatN>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("DW face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
template <typename FloatN, typename Float>
class PackFaceDW4D : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceDW4D(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceDW4D() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_DOMAIN_WALL_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceDW4DKernel<1,FloatN>) : &(packFaceDW4DKernel<0,FloatN>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("4D preconditioned DW face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
void packFaceDW(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, const int dagger,
const int parity, const hipStream_t &stream) {
if(in.DWFPCtype() == QUDA_4D_PC)
{
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceDW4D<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceDW4D<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceDW4D<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceDW4D<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
else
{
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceDW<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceDW<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceDW<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceDW<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
}
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
template <int dagger, typename FloatN>
__global__ void packFaceNdegTMKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
const int Nf = 2;
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing:
// 0 = beginning, 1 = end FIXME these param.dc.ghostFace constants
// do not include the Nf dimension
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif
template <typename FloatN, typename Float>
class PackFaceNdegTM : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceNdegTM(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceNdegTM() { }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceNdegTMKernel<1,FloatN>) : &(packFaceNdegTMKernel<0,FloatN>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("Non-degenerate twisted mass face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
void packFaceNdegTM(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, const int dagger,
const int parity, const hipStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceNdegTM<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceNdegTM<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceNdegTM<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceNdegTM<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
void packFace(void *ghost_buf[2*QUDA_MAX_DIM], cudaColorSpinorField &in,
MemoryLocation location, const int nFace,
const int dagger, const int parity,
const int dim, const int face_num,
const hipStream_t &stream,
const double a, const double b)
{
int nDimPack = 0;
if(dim < 0){
for (int d=0; d<4; d++) {
if(!commDim[d]) continue;
if (d != 3 || getKernelPackT() || a != 0.0 || b!= 0.0) nDimPack++;
}
}else{
if(commDim[dim]){
if(dim!=3 || getKernelPackT() || a!=0.0 || b != 0.0) nDimPack++;
}
}
if (!nDimPack) return; // if zero then we have nothing to pack
if (nFace != 1 && in.Nspin() != 1)
errorQuda("Unsupported number of faces %d", nFace);
// Need to update this logic for other multi-src dslash packing
if (in.Nspin() == 1) {
packFaceStaggered(ghost_buf, in, location, nFace, dagger, parity, dim, face_num, stream);
} else if (a!=0.0 || b!=0.0) {
// Need to update this logic for other multi-src dslash packing
if(in.TwistFlavor() == QUDA_TWIST_SINGLET) {
packTwistedFaceWilson(ghost_buf, in, location, dagger, parity, a, b, stream);
} else {
errorQuda("Cannot perform twisted packing for the spinor.");
}
} else if (in.Ndim() == 5) {
if(in.TwistFlavor() == QUDA_TWIST_INVALID) {
packFaceDW(ghost_buf, in, location, dagger, parity, stream);
} else {
packFaceNdegTM(ghost_buf, in, location, dagger, parity, stream);
}
} else {
packFaceWilson(ghost_buf, in, location, dagger, parity, stream);
}
}
void packFaceExtended(void* buffer[2*QUDA_MAX_DIM], cudaColorSpinorField &field,
MemoryLocation location, const int nFace, const int R[],
const int dagger, const int parity, const int dim, const int face_num,
const hipStream_t &stream, const bool unpack)
{
int nDimPack = 0;
if(dim < 0){
for(int d=0; d<4; d++){
if(R[d]) nDimPack++;
}
}else{
if(R[dim]) nDimPack++;
}
if(!nDimPack) return; // if zero then we have nothing to pack
if(field.Nspin() == 1){
packFaceExtendedStaggered(buffer, field, location, nFace, R, dagger, parity, dim, face_num, stream, unpack);
}else{
errorQuda("Extended quark field is not supported");
}
}
#endif // MULTI_GPU
}
| ffe90496e1652b313de37d8901a7664e02a28c49.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h> // Do we need this now?
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
#include <uint_to_char.h>
namespace quda {
namespace pack {
#include <dslash_constants.h>
#include <dslash_textures.h>
} // end namespace pack
using namespace pack;
#ifdef MULTI_GPU
static int commDim[QUDA_MAX_DIM]; // Whether to do comms or not
void setPackComms(const int *comm_dim) {
for (int i=0; i<4; i++) commDim[i] = comm_dim[i];
for (int i=4; i<QUDA_MAX_DIM; i++) commDim[i] = 0;
}
#else
void setPackComms(const int *comm_dim) { ; }
#endif
#define STRIPED
#ifdef STRIPED
#else
#define SWIZZLE
#endif
#include <dslash_index.cuh>
// routines for packing the ghost zones (multi-GPU only)
#ifdef MULTI_GPU
template <typename FloatN>
struct PackParam {
FloatN *out[2*4];
float *outNorm[2*4];
FloatN *in;
float *inNorm;
int_fastdiv threads; // total number of threads
// offsets which determine thread mapping to dimension
int threadDimMapLower[4]; // lowest thread which maps to dim
int threadDimMapUpper[4]; // greatest thread + 1 which maps to dim
int parity;
#ifdef USE_TEXTURE_OBJECTS
cudaTextureObject_t inTex;
cudaTextureObject_t inTexNorm;
#endif
int dim;
int face_num;
DslashConstant dc;
int sp_stride;
int_fastdiv swizzle;
int sites_per_block;
};
template<typename FloatN>
std::ostream& operator<<(std::ostream& output, const PackParam<FloatN>& param) {
output << "threads = " << param.threads << std::endl;
output << "threadDimMapLower = {" << param.threadDimMapLower[0] << "," <<
param.threadDimMapLower[1] << "," << param.threadDimMapLower[2] << "," << param.threadDimMapLower[3] << "}" << std::endl;
output << "threadDimMapUpper = {" << param.threadDimMapUpper[0] << "," <<
param.threadDimMapUpper[1] << "," << param.threadDimMapUpper[2] << "," << param.threadDimMapUpper[3] << "}" << std::endl;
output << "parity = " << param.parity << std::endl;
output << "dim = " << param.dim << std::endl;
output << "face_num = " << param.face_num << std::endl;
output << "X = {" << param.dc.X[0] << ","<< param.dc.X[1] << "," << param.dc.X[2] << "," << param.dc.X[3] << "," << param.dc.X[4] << "}" << std::endl;
output << "ghostFace = {" << param.dc.ghostFace[0] << ","<< param.dc.ghostFace[1] << ","
<< param.dc.ghostFace[2] << "," << param.dc.ghostFace[3] << "}" << std::endl;
output << "sp_stride = " << param.sp_stride << std::endl;
return output;
}
// Extend the PackParam class to PackExtendedParam
template<typename Float>
struct PackExtendedParam : public PackParam<Float>
{
PackExtendedParam(){}
PackExtendedParam(const PackParam<Float>& base) : PackParam<Float>(base) {}
int R[QUDA_MAX_DIM]; // boundary dimensions
};
#if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
// double precision
#if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX)
#define READ_SPINOR READ_SPINOR_DOUBLE
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_DOUBLE_TEX
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexDouble
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2
#define SPINOR_DOUBLE
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(double2 *out, float *outNorm, const double2 *in,
const float *inNorm, const int &idx,
const int &face_idx, const int &face_volume,
PackParam<double2> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(double2 *out, float *outNorm, const double2 *in,
const float *inNorm, const int &idx,
const int &face_idx, const int &face_volume,
PackParam<double2> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
#undef SPINOR_DOUBLE
// single precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_SINGLE
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_SINGLE_TEX
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexSingle
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<float4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<float4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// half precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_HALF
#define READ_SPINOR_UP READ_SPINOR_HALF_UP
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_HALF_TEX
#define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<short4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<short4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// quarter precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_QUARTER
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_QUARTER_TEX
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_CHAR4
template <int dim, int dagger, int face_num>
static inline __device__ void packFaceWilsonCore(char4 *out, float *outNorm, const char4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<char4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
template <int dim, int dagger, int face_num>
static inline __device__ void unpackFaceWilsonCore(char4 *out, float *outNorm, const char4 *in, const float *inNorm,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<char4> ¶m)
{
if (dagger) {
#include "wilson_pack_face_dagger_core.h"
} else {
#include "wilson_pack_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
template <int dagger, typename FloatN>
__global__ void packFaceWilsonKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <int dagger, typename FloatN, int nFace>
__global__ void packFaceExtendedWilsonKernel(PackParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice
// specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <int dagger, typename FloatN, int nFace>
__global__ void unpackFaceExtendedWilsonKernel(PackParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwise pack the region of the lattice
// specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<0,nFace,0>(face_idx,param);
unpackFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndexExtended<0,nFace,1>(face_idx,param);
unpackFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<1,nFace,0>(face_idx,param);
unpackFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndexExtended<1,nFace,1>(face_idx,param);
unpackFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<2,nFace,0>(face_idx,param);
unpackFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndexExtended<2,nFace,1>(face_idx,param);
unpackFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtended<3,nFace,0>(face_idx,param);
unpackFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndexExtended<3,nFace,1>(face_idx,param);
unpackFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm,idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC
#if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC)
#endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC
#if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC)
// double precision
#endif // GPU_WILSON_DIRAC || GPU_DOMAIN_WALL_DIRAC
#if defined(GPU_WILSON_DIRAC) || defined(GPU_TWISTED_MASS_DIRAC)
// double precision
#if (defined DIRECT_ACCESS_WILSON_PACK_SPINOR) || (defined FERMI_NO_DBLE_TEX)
#define READ_SPINOR READ_SPINOR_DOUBLE
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_DOUBLE_TEX
#define READ_SPINOR_UP READ_SPINOR_DOUBLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_DOUBLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexDouble
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_DOUBLE2
#define SPINOR_DOUBLE
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(double2 *out, float *outNorm, const double2 *in,
const float *inNorm, double a, double b, const int &idx,
const int &face_idx, const int &face_volume,
PackParam<double2> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
#undef SPINOR_DOUBLE
// single precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_SINGLE
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_SINGLE_TEX
#define READ_SPINOR_UP READ_SPINOR_SINGLE_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_SINGLE_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexSingle
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_FLOAT4
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(float4 *out, float *outNorm, const float4 *in, const float *inNorm, float a, float b,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<float4> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// half precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_HALF
#define READ_SPINOR_UP READ_SPINOR_HALF_UP
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_HALF_TEX
#define READ_SPINOR_UP READ_SPINOR_HALF_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_HALF_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_SHORT4
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(short4 *out, float *outNorm, const short4 *in, const float *inNorm, float a, float b,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<short4> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
// quarter precision
#ifdef DIRECT_ACCESS_WILSON_PACK_SPINOR
#define READ_SPINOR READ_SPINOR_QUARTER
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN
#define SPINORTEX in
#else
#define READ_SPINOR READ_SPINOR_QUARTER_TEX
#define READ_SPINOR_UP READ_SPINOR_QUARTER_UP_TEX
#define READ_SPINOR_DOWN READ_SPINOR_QUARTER_DOWN_TEX
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEX param.inTex
#else
#define SPINORTEX spinorTexHalf
#endif
#endif
#define WRITE_HALF_SPINOR WRITE_HALF_SPINOR_CHAR4
template <int dim, int dagger, int face_num>
static inline __device__ void packTwistedFaceWilsonCore(char4 *out, float *outNorm, const char4 *in, const float *inNorm, float a, float b,
const int &idx, const int &face_idx,
const int &face_volume,
const PackParam<char4> ¶m)
{
if (dagger) {
#include "wilson_pack_twisted_face_dagger_core.h"
} else {
#include "wilson_pack_twisted_face_core.h"
}
}
#undef READ_SPINOR
#undef READ_SPINOR_UP
#undef READ_SPINOR_DOWN
#undef SPINORTEX
#undef WRITE_HALF_SPINOR
template <int dagger, typename FloatN, typename Float>
__global__ void packTwistedFaceWilsonKernel(Float a, Float b, PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packTwistedFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, a, b,idx, face_idx, param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<4,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packTwistedFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, a, b, idx, face_idx, param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif // GPU_TWISTED_MASS_DIRAC
template <typename FloatN, typename Float>
class PackFace : public Tunable {
protected:
void *faces[2*QUDA_MAX_DIM];
const cudaColorSpinorField *in;
const int dagger;
const int parity;
const int nFace;
const int dim;
const int face_num;
const MemoryLocation location;
// compute how many threads we need in total for the face packing
unsigned int threads() const {
unsigned int threads = 0;
if(dim < 0){ // if dim is negative, pack all dimensions
for (int i=0; i<4; i++) {
if (!commDim[i]) continue;
if ( i==3 && !getKernelPackT() ) continue;
threads += 2*nFace*in->GhostFace()[i]; // 2 for forwards and backwards faces
}
}else{ // pack only in dim dimension
if( commDim[dim] && (dim!=3 || getKernelPackT() )){
threads = nFace*in->GhostFace()[dim];
if(face_num==2) threads *= 2; // sending data forwards and backwards
}
}
return threads;
}
virtual int inputPerSite() const = 0;
virtual int outputPerSite() const = 0;
// prepare the param struct with kernel arguments
void prepareParam(PackParam<FloatN> ¶m, TuneParam &tp, int dim=-1, int face_num=2) {
param.in = (FloatN*)in->V();
param.inNorm = (float*)in->Norm();
param.dim = dim;
param.face_num = face_num;
param.parity = parity;
#ifdef USE_TEXTURE_OBJECTS
param.inTex = in->Tex();
param.inTexNorm = in->TexNorm();
#endif
param.threads = threads();
param.sp_stride = in->Stride();
int prev = -1; // previous dimension that was partitioned
for (int i=0; i<4; i++) {
param.threadDimMapLower[i] = 0;
param.threadDimMapUpper[i] = 0;
if (!commDim[i]) continue;
param.threadDimMapLower[i] = (prev>=0 ? param.threadDimMapUpper[prev] : 0);
param.threadDimMapUpper[i] = param.threadDimMapLower[i] + 2*nFace*in->GhostFace()[i];
param.out[2*i+0] = static_cast<FloatN*>(faces[2*i+0]);
param.out[2*i+1] = static_cast<FloatN*>(faces[2*i+1]);
param.outNorm[2*i+0] = reinterpret_cast<float*>(static_cast<char*>(faces[2*i+0]) + nFace*outputPerSite()*in->GhostFace()[i]*in->Precision());
param.outNorm[2*i+1] = reinterpret_cast<float*>(static_cast<char*>(faces[2*i+1]) + nFace*outputPerSite()*in->GhostFace()[i]*in->Precision());
prev=i;
}
param.dc = in->getDslashConstant(); // get pre-computed constants
param.swizzle = tp.aux.x;
param.sites_per_block = (param.threads + tp.grid.x - 1) / tp.grid.x;
}
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
#ifdef STRIPED
bool tuneGridDim() const { return true; } // If striping, always tune grid dimension
unsigned int maxGridSize() const {
if (location & Host) {
// if zero-copy policy then set a maximum number of blocks to be
// the 3 * number of dimensions we are communicating
int nDimComms = 0;
for (int d=0; d<in->Ndim(); d++) nDimComms += commDim[d];
return 3*nDimComms;
} else {
return Tunable::maxGridSize();
}
} // use no more than a quarter of the GPU
unsigned int minGridSize() const {
if (location & Host) {
// if zero-copy policy then set a maximum number of blocks to be
// the 1 * number of dimensions we are communicating
int nDimComms = 0;
for (int d=0; d<in->Ndim(); d++) nDimComms += commDim[d];
return nDimComms;
} else {
return Tunable::minGridSize();
}
}
#else
bool tuneGridDim() const { return location & Host; } // only tune grid dimension if doing zero-copy writing
unsigned int maxGridSize() const { return tuneGridDim ? deviceProp.multiProcessorCount/4 : Tunable::maxGridSize(); } // use no more than a quarter of the GPU
#endif
bool tuneAuxDim() const { return true; } // Do tune the aux dimensions.
unsigned int minThreads() const { return threads(); }
void fillAux() {
strcpy(aux,"policy_kernel,");
strcat(aux, in->AuxString());
char comm[5];
comm[0] = (commDim[0] ? '1' : '0');
comm[1] = (commDim[1] ? '1' : '0');
comm[2] = (commDim[2] ? '1' : '0');
comm[3] = (commDim[3] ? '1' : '0');
comm[4] = '\0'; strcat(aux,",comm=");
strcat(aux,comm);
strcat(aux,comm_dim_topology_string());
if (getKernelPackT()) { strcat(aux,",kernelPackT"); }
switch (nFace) {
case 1: strcat(aux,",nFace=1,"); break;
case 3: strcat(aux,",nFace=3,"); break;
default: errorQuda("Number of faces not supported");
}
// label the locations we are packing to
// location lable is nonp2p-p2p
switch ((int)location) {
case Device|Remote: strcat(aux,"device-remote"); break;
case Host|Remote: strcat(aux, "host-remote"); break;
case Device: strcat(aux,"device-device"); break;
case Host: strcat(aux, comm_peer2peer_enabled_global() ? "host-device" : "host-host"); break;
default: errorQuda("Unknown pack target location %d\n", location);
}
}
public:
PackFace(void *faces_[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity, const int nFace, const int dim=-1, const int face_num=2)
: in(in), dagger(dagger),
parity(parity), nFace(nFace), dim(dim), face_num(face_num), location(location)
{
memcpy(faces, faces_, 2*QUDA_MAX_DIM*sizeof(void*));
fillAux();
#ifndef USE_TEXTURE_OBJECTS
bindSpinorTex<FloatN>(in);
#endif
}
virtual ~PackFace() {
#ifndef USE_TEXTURE_OBJECTS
unbindSpinorTex<FloatN>(in);
#endif
}
bool tuneSharedBytes() const { return location & Host ? false : Tunable::tuneSharedBytes(); }
bool advanceAux(TuneParam ¶m) const
{
#ifdef SWIZZLE
if ( location & Remote ) { // only swizzling if we're doing remote writing
if (param.aux.x < (int)maxGridSize()) {
param.aux.x++;
return true;
} else {
param.aux.x = 1;
return false;
}
} else {
return false;
}
#else
return false;
#endif
}
void initTuneParam(TuneParam ¶m) const {
Tunable::initTuneParam(param);
param.aux.x = 1; // swizzle factor
// if doing a zero-copy policy then ensure that each thread block
// runs exclusively on a given SM - this is to ensure quality of
// service for the packing kernel when running concurrently.
if (location & Host) param.shared_bytes = deviceProp.sharedMemPerBlock / 2 + 1;
}
void defaultTuneParam(TuneParam ¶m) const {
Tunable::defaultTuneParam(param);
param.aux.x = 1; // swizzle factor
}
long long flops() const { return outputPerSite()*this->threads(); }
virtual int tuningIter() const { return 3; }
virtual TuneKey tuneKey() const { return TuneKey(in->VolString(), typeid(*this).name(), aux); }
virtual void apply(const cudaStream_t &stream) = 0;
long long bytes() const {
size_t faceBytes = (inputPerSite() + outputPerSite())*this->threads()*sizeof(((FloatN*)0)->x);
if (sizeof(((FloatN*)0)->x) == QUDA_HALF_PRECISION)
faceBytes += 2*this->threads()*sizeof(float); // 2 is from input and output
return faceBytes;
}
};
template <typename FloatN, typename Float>
class PackFaceWilson : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceWilson(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceWilson() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_WILSON_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceWilsonKernel<1,FloatN>) : &(packFaceWilsonKernel<0,FloatN>);
qudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("Wilson face packing kernel is not built");
#endif
}
};
void packFaceWilson(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location,
const int dagger, const int parity, const cudaStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceWilson<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceWilson<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceWilson<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceWilson<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
template <typename FloatN, typename Float>
class PackFaceTwisted : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
Float a;
Float b;
public:
PackFaceTwisted(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity, Float a, Float b)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1), a(a), b(b) { }
virtual ~PackFaceTwisted() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_TWISTED_MASS_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { &a, &b, ¶m };
void (*func)(Float,Float,PackParam<FloatN>) = this->dagger ? &(packTwistedFaceWilsonKernel<1,FloatN,Float>) : &(packTwistedFaceWilsonKernel<0,FloatN,Float>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("Twisted face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
//!
void packTwistedFaceWilson(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, const int dagger,
const int parity, const double a, const double b, const cudaStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceTwisted<double2, double> pack(ghost_buf, &in, location, dagger, parity, a, b);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceTwisted<float4, float> pack(ghost_buf, &in, location, dagger, parity, (float)a, (float)b);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceTwisted<short4, float> pack(ghost_buf, &in, location, dagger, parity, (float)a, (float)b);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceTwisted<char4, float> pack(ghost_buf, &in, location, dagger, parity, (float)a, (float)b);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
#ifdef GPU_STAGGERED_DIRAC
#ifdef USE_TEXTURE_OBJECTS
#define SPINORTEXDOUBLE param.inTex
#define SPINORTEXSINGLE param.inTex
#define SPINORTEXHALF param.inTex
#define SPINORTEXHALFNORM param.inTexNorm
#define SPINORTEXQUARTER param.inTex
#define SPINORTEXQUARTERNORM param.inTexNorm
#else
#define SPINORTEXDOUBLE spinorTexDouble
#define SPINORTEXSINGLE spinorTexSingle2
#define SPINORTEXHALF spinorTexHalf2
#define SPINORTEXHALFNORM spinorTexHalf2Norm
#define SPINORTEXQUARTER spinorTexQuarter2
#define SPINORTEXQUARTERNORM spinorTexQuarter2Norm
#endif
template <typename Float2>
__device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx,
const int out_stride, const Float2 *in, const float *inNorm,
const int in_idx, const int in_stride) {
out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride];
}
template<>
__device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx,
const int out_stride, const short2 *in, const float *inNorm,
const int in_idx, const int in_stride) {
out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride];
outNorm[out_idx] = inNorm[in_idx];
}
template<>
__device__ void packFaceStaggeredCore(char2 *out, float *outNorm, const int out_idx,
const int out_stride, const char2 *in, const float *inNorm,
const int in_idx, const int in_stride) {
out[out_idx + 0*out_stride] = in[in_idx + 0*in_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*in_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*in_stride];
outNorm[out_idx] = inNorm[in_idx];
}
#if (defined DIRECT_ACCESS_PACK) || (defined FERMI_NO_DBLE_TEX)
template <typename Float2>
__device__ void packFaceStaggeredCore(Float2 *out, float *outNorm, const int out_idx,
const int out_stride, const Float2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride];
}
template<>
__device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx,
const int out_stride, const short2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride];
outNorm[out_idx] = inNorm[in_idx];
}
template<>
__device__ void packFaceStaggeredCore(char2 *out, float *outNorm, const int out_idx,
const int out_stride, const char2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = in[in_idx + 0*param.sp_stride];
out[out_idx + 1*out_stride] = in[in_idx + 1*param.sp_stride];
out[out_idx + 2*out_stride] = in[in_idx + 2*param.sp_stride];
outNorm[out_idx] = inNorm[in_idx];
}
#else
__device__ void packFaceStaggeredCore(double2 *out, float *outNorm, const int out_idx,
const int out_stride, const double2 *in, const float *inNorm,
const int in_idx, const PackParam<double2> ¶m) {
out[out_idx + 0*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 0*param.sp_stride);
out[out_idx + 1*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 1*param.sp_stride);
out[out_idx + 2*out_stride] = fetch_double2(SPINORTEXDOUBLE, in_idx + 2*param.sp_stride);
}
__device__ void packFaceStaggeredCore(float2 *out, float *outNorm, const int out_idx,
const int out_stride, const float2 *in,
const float *inNorm, const int in_idx,
const PackParam<float2> ¶m) {
out[out_idx + 0*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 0*param.sp_stride);
out[out_idx + 1*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 1*param.sp_stride);
out[out_idx + 2*out_stride] = TEX1DFETCH(float2, SPINORTEXSINGLE, in_idx + 2*param.sp_stride);
}
// this is rather dumb: undoing the texture load because cudaNormalizedReadMode is used
// should really bind to an appropriate texture instead of reusing
inline __device__ short2 float22short2(float c, float2 a) {
return make_short2((short)(a.x*(c*fixedMaxValue<short>::value)), (short)(a.y*(c*fixedMaxValue<short>::value)));
}
inline __device__ char2 float22char2(float c, float2 a) {
return make_char2((char)(a.x*(c*fixedMaxValue<char>::value)), (char)(a.y*(c*fixedMaxValue<char>::value)));
}
__device__ void packFaceStaggeredCore(short2 *out, float *outNorm, const int out_idx,
const int out_stride, const short2 *in,
const float *inNorm, const int in_idx,
const PackParam<short2> ¶m) {
out[out_idx + 0*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+0*param.sp_stride));
out[out_idx + 1*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+1*param.sp_stride));
out[out_idx + 2*out_stride] = float22short2(1.0f,TEX1DFETCH(float2,SPINORTEXHALF,in_idx+2*param.sp_stride));
outNorm[out_idx] = TEX1DFETCH(float, SPINORTEXHALFNORM, in_idx);
}
__device__ void packFaceStaggeredCore(char2 *out, float *outNorm, const int out_idx,
const int out_stride, const char2 *in,
const float *inNorm, const int in_idx,
const PackParam<char2> ¶m) {
out[out_idx + 0*out_stride] = float22char2(1.0f,TEX1DFETCH(float2,SPINORTEXQUARTER,in_idx+0*param.sp_stride));
out[out_idx + 1*out_stride] = float22char2(1.0f,TEX1DFETCH(float2,SPINORTEXQUARTER,in_idx+1*param.sp_stride));
out[out_idx + 2*out_stride] = float22char2(1.0f,TEX1DFETCH(float2,SPINORTEXQUARTER,in_idx+2*param.sp_stride));
outNorm[out_idx] = TEX1DFETCH(float, SPINORTEXQUARTERNORM, in_idx);
}
#endif
template <typename FloatN, int nFace>
__global__ void packFaceStaggeredKernel(PackParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
const int Ls = param.dc.X[4];
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor and write to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<0,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx,
Ls*nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<0,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx,
Ls*nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<1,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx,
Ls*nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<1,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx,
Ls*nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<2,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx,
Ls*nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<2,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx,
Ls*nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= Ls*nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*Ls*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexStaggered<3,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx,
Ls*nFace*param.dc.ghostFace[3], param.in, param.inNorm,idx, param);
} else {
const int idx = indexFromFaceIndexStaggered<3,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx,
Ls*nFace*param.dc.ghostFace[3], param.in, param.inNorm, idx, param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <typename FloatN, int nFace>
__global__ void packFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwise pack the region of the
// lattice specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[0], param.outNorm[0], face_idx,
nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[1], param.outNorm[1], face_idx,
nFace*param.dc.ghostFace[0], param.in, param.inNorm, idx, param);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[2], param.outNorm[2], face_idx,
nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[3], param.outNorm[3], face_idx,
nFace*param.dc.ghostFace[1], param.in, param.inNorm, idx, param);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[4], param.outNorm[4], face_idx,
nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[5], param.outNorm[5], face_idx,
nFace*param.dc.ghostFace[2], param.in, param.inNorm, idx, param);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.out[6], param.outNorm[6], face_idx,
nFace*param.dc.ghostFace[3], param.in, param.inNorm,idx, param);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.out[7], param.outNorm[7], face_idx,
nFace*param.dc.ghostFace[3], param.in, param.inNorm, idx, param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <typename FloatN, int nFace>
__global__ void unpackFaceExtendedStaggeredKernel(PackExtendedParam<FloatN> param)
{
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = start, 1 = end
// if param.face_num==2 pack both the start and the end, otherwist pack the region of the
// lattice specified by param.face_num
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[0]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[0], param.outNorm[0], face_idx, nFace*param.dc.ghostFace[0]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<0,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[1], param.outNorm[1], face_idx, nFace*param.dc.ghostFace[0]);
}
} else if (dim == 1) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[1]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[2], param.outNorm[2], face_idx, nFace*param.dc.ghostFace[1]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<1,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[3], param.outNorm[3], face_idx, nFace*param.dc.ghostFace[1]);
}
} else if (dim == 2) {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[2]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[4], param.outNorm[4], face_idx, nFace*param.dc.ghostFace[2]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<2,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[5], param.outNorm[5], face_idx, nFace*param.dc.ghostFace[2]);
}
} else {
const int face_num = (param.face_num==2) ? ((face_idx >= nFace*param.dc.ghostFace[3]) ? 1 : 0) : param.face_num;
if(param.face_num==2) face_idx -= face_num*nFace*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,0>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[6], param.outNorm[6], face_idx, nFace*param.dc.ghostFace[3]);
} else {
const int idx = indexFromFaceIndexExtendedStaggered<3,nFace,1>(face_idx,param);
packFaceStaggeredCore(param.in, param.inNorm, idx,
param.sp_stride, param.out[7], param.outNorm[7], face_idx, nFace*param.dc.ghostFace[3]);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#undef SPINORTEXDOUBLE
#undef SPINORTEXSINGLE
#undef SPINORTEXHALF
#undef SPINORTEXQUARTER
#endif // GPU_STAGGERED_DIRAC
template <typename FloatN, typename Float>
class PackFaceStaggered : public PackFace<FloatN, Float> {
private:
const int* R; // boundary dimensions for extended field
const bool unpack;
int inputPerSite() const { return 6; } // input is full spinor
int outputPerSite() const { return 6; } // output is full spinor
public:
PackFaceStaggered(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int nFace, const int dagger, const int parity,
const int dim, const int face_num, const int* R=NULL, const bool unpack=false)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, nFace, dim, face_num), R(R), unpack(unpack) { }
virtual ~PackFaceStaggered() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_STAGGERED_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp,this->dim,this->face_num);
if(!R){
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = PackFace<FloatN,Float>::nFace==1 ? &(packFaceStaggeredKernel<FloatN,1>) : &(packFaceStaggeredKernel<FloatN,3>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
}else{ // R!=NULL => this is an extended field
PackExtendedParam<FloatN> extendedParam(param);
if(!unpack){
for(int d=0; d<QUDA_MAX_DIM; ++d) extendedParam.R[d] = R[d];
switch(PackFace<FloatN,Float>::nFace){
case 1:
packFaceExtendedStaggeredKernel<FloatN,1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
case 2:
packFaceExtendedStaggeredKernel<FloatN,2><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
case 3:
packFaceExtendedStaggeredKernel<FloatN,3><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
case 4:
packFaceExtendedStaggeredKernel<FloatN,4><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
default:
errorQuda("Unsupported boundary width");
break;
}
}else{ // extended field unpack
switch(PackFace<FloatN,Float>::nFace){
case 1:
unpackFaceExtendedStaggeredKernel<FloatN,1><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
case 2:
unpackFaceExtendedStaggeredKernel<FloatN,2><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
case 3:
unpackFaceExtendedStaggeredKernel<FloatN,3><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
case 4:
unpackFaceExtendedStaggeredKernel<FloatN,4><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(extendedParam);
break;
default:
errorQuda("Unsupported boundary width");
break;
}
}
}
#else
errorQuda("Staggered face packing kernel is not built");
#endif
}
long long flops() const { return 0; }
};
void packFaceStaggered(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, int nFace,
int dagger, int parity, const int dim, const int face_num, const cudaStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceStaggered<double2, double> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceStaggered<float2, float> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceStaggered<short2, float> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceStaggered<char2, float> pack(ghost_buf, &in, location, nFace, dagger, parity, dim, face_num);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
void packFaceExtendedStaggered(void *buffer[], cudaColorSpinorField &field, MemoryLocation location, const int nFace, const int R[],
int dagger, int parity, const int dim, const int face_num, const cudaStream_t &stream, bool unpack=false)
{
switch(field.Precision()){
case QUDA_DOUBLE_PRECISION:
{
PackFaceStaggered<double2,double> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceStaggered<float2,float> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceStaggered<short2,float> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceStaggered<char2,float> pack(buffer, &field, location, nFace, dagger, parity, dim, face_num, R, unpack);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", field.Precision());
} // switch(field.Precision())
}
#ifdef GPU_DOMAIN_WALL_DIRAC
template <int dagger, typename FloatN>
__global__ void packFaceDWKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for dwf
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
const int Ls = param.dc.X[4];
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end
// FIXME these param.dc.ghostFace constants do not incude the Ls dimension
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_5D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
template <int dagger, typename FloatN>
__global__ void packFaceDW4DKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
const int Ls = param.dc.X[4];
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing: 0 = beginning, 1 = end
// FIXME these param.dc.ghostFace constants do not incude the Ls dimension
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*Ls*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*Ls*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, idx, face_idx, Ls*param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif
template <typename FloatN, typename Float>
class PackFaceDW : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceDW(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceDW() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_DOMAIN_WALL_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceDWKernel<1,FloatN>) : &(packFaceDWKernel<0,FloatN>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("DW face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
template <typename FloatN, typename Float>
class PackFaceDW4D : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceDW4D(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceDW4D() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_DOMAIN_WALL_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceDW4DKernel<1,FloatN>) : &(packFaceDW4DKernel<0,FloatN>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("4D preconditioned DW face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
void packFaceDW(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, const int dagger,
const int parity, const cudaStream_t &stream) {
if(in.DWFPCtype() == QUDA_4D_PC)
{
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceDW4D<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceDW4D<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceDW4D<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceDW4D<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
else
{
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceDW<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceDW<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceDW<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceDW<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
}
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
template <int dagger, typename FloatN>
__global__ void packFaceNdegTMKernel(PackParam<FloatN> param)
{
const int nFace = 1; // 1 face for Wilson
const int Nf = 2;
#ifdef STRIPED
const int sites_per_block = param.sites_per_block;
int local_tid = threadIdx.x;
int tid = sites_per_block * blockIdx.x + local_tid;
#else
int tid = block_idx(param.swizzle) * blockDim.x + threadIdx.x;
constexpr int sites_per_block = 1;
constexpr int local_tid = 0;
#endif
while ( local_tid < sites_per_block && tid < param.threads ) {
// determine which dimension we are packing
int face_idx;
const int dim = dimFromFaceIndex(face_idx, tid, param);
// compute where the output is located
// compute an index into the local volume from the index into the face
// read spinor, spin-project, and write half spinor to face
if (dim == 0) {
// face_num determines which end of the lattice we are packing:
// 0 = beginning, 1 = end FIXME these param.dc.ghostFace constants
// do not include the Nf dimension
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[0]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[0];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,0>(face_idx,param);
packFaceWilsonCore<0,dagger,0>(param.out[0], param.outNorm[0], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[0], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,0,nFace,1>(face_idx,param);
packFaceWilsonCore<0,dagger,1>(param.out[1], param.outNorm[1], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[0], param);
}
} else if (dim == 1) {
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[1]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[1];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,0>(face_idx,param);
packFaceWilsonCore<1, dagger,0>(param.out[2], param.outNorm[2], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[1], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,1,nFace,1>(face_idx,param);
packFaceWilsonCore<1, dagger,1>(param.out[3], param.outNorm[3], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[1], param);
}
} else if (dim == 2) {
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[2]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[2];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,0>(face_idx,param);
packFaceWilsonCore<2, dagger,0>(param.out[4], param.outNorm[4], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[2], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,2,nFace,1>(face_idx,param);
packFaceWilsonCore<2, dagger,1>(param.out[5], param.outNorm[5], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[2], param);
}
} else {
const int face_num = (face_idx >= nFace*Nf*param.dc.ghostFace[3]) ? 1 : 0;
face_idx -= face_num*nFace*Nf*param.dc.ghostFace[3];
if (face_num == 0) {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,0>(face_idx,param);
packFaceWilsonCore<3, dagger,0>(param.out[6], param.outNorm[6], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[3], param);
} else {
const int idx = indexFromFaceIndex<5,QUDA_4D_PC,3,nFace,1>(face_idx,param);
packFaceWilsonCore<3, dagger,1>(param.out[7], param.outNorm[7], param.in,
param.inNorm, idx, face_idx, Nf*param.dc.ghostFace[3], param);
}
}
#ifdef STRIPED
local_tid += blockDim.x;
tid += blockDim.x;
#else
tid += blockDim.x*gridDim.x;
#endif
} // while tid
}
#endif
template <typename FloatN, typename Float>
class PackFaceNdegTM : public PackFace<FloatN, Float> {
private:
int inputPerSite() const { return 24; } // input is full spinor
int outputPerSite() const { return 12; } // output is spin projected
public:
PackFaceNdegTM(void *faces[], const cudaColorSpinorField *in, MemoryLocation location,
const int dagger, const int parity)
: PackFace<FloatN, Float>(faces, in, location, dagger, parity, 1) { }
virtual ~PackFaceNdegTM() { }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
static PackParam<FloatN> param;
this->prepareParam(param,tp);
void *args[] = { ¶m };
void (*func)(PackParam<FloatN>) = this->dagger ? &(packFaceNdegTMKernel<1,FloatN>) : &(packFaceNdegTMKernel<0,FloatN>);
cudaLaunchKernel( (const void*)func, tp.grid, tp.block, args, tp.shared_bytes, stream);
#else
errorQuda("Non-degenerate twisted mass face packing kernel is not built");
#endif
}
long long flops() const { return outputPerSite()*this->threads(); }
};
void packFaceNdegTM(void *ghost_buf[], cudaColorSpinorField &in, MemoryLocation location, const int dagger,
const int parity, const cudaStream_t &stream) {
switch(in.Precision()) {
case QUDA_DOUBLE_PRECISION:
{
PackFaceNdegTM<double2, double> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_SINGLE_PRECISION:
{
PackFaceNdegTM<float4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_HALF_PRECISION:
{
PackFaceNdegTM<short4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
case QUDA_QUARTER_PRECISION:
{
PackFaceNdegTM<char4, float> pack(ghost_buf, &in, location, dagger, parity);
pack.apply(stream);
}
break;
default:
errorQuda("Precision %d not supported", in.Precision());
}
}
void packFace(void *ghost_buf[2*QUDA_MAX_DIM], cudaColorSpinorField &in,
MemoryLocation location, const int nFace,
const int dagger, const int parity,
const int dim, const int face_num,
const cudaStream_t &stream,
const double a, const double b)
{
int nDimPack = 0;
if(dim < 0){
for (int d=0; d<4; d++) {
if(!commDim[d]) continue;
if (d != 3 || getKernelPackT() || a != 0.0 || b!= 0.0) nDimPack++;
}
}else{
if(commDim[dim]){
if(dim!=3 || getKernelPackT() || a!=0.0 || b != 0.0) nDimPack++;
}
}
if (!nDimPack) return; // if zero then we have nothing to pack
if (nFace != 1 && in.Nspin() != 1)
errorQuda("Unsupported number of faces %d", nFace);
// Need to update this logic for other multi-src dslash packing
if (in.Nspin() == 1) {
packFaceStaggered(ghost_buf, in, location, nFace, dagger, parity, dim, face_num, stream);
} else if (a!=0.0 || b!=0.0) {
// Need to update this logic for other multi-src dslash packing
if(in.TwistFlavor() == QUDA_TWIST_SINGLET) {
packTwistedFaceWilson(ghost_buf, in, location, dagger, parity, a, b, stream);
} else {
errorQuda("Cannot perform twisted packing for the spinor.");
}
} else if (in.Ndim() == 5) {
if(in.TwistFlavor() == QUDA_TWIST_INVALID) {
packFaceDW(ghost_buf, in, location, dagger, parity, stream);
} else {
packFaceNdegTM(ghost_buf, in, location, dagger, parity, stream);
}
} else {
packFaceWilson(ghost_buf, in, location, dagger, parity, stream);
}
}
void packFaceExtended(void* buffer[2*QUDA_MAX_DIM], cudaColorSpinorField &field,
MemoryLocation location, const int nFace, const int R[],
const int dagger, const int parity, const int dim, const int face_num,
const cudaStream_t &stream, const bool unpack)
{
int nDimPack = 0;
if(dim < 0){
for(int d=0; d<4; d++){
if(R[d]) nDimPack++;
}
}else{
if(R[dim]) nDimPack++;
}
if(!nDimPack) return; // if zero then we have nothing to pack
if(field.Nspin() == 1){
packFaceExtendedStaggered(buffer, field, location, nFace, R, dagger, parity, dim, face_num, stream, unpack);
}else{
errorQuda("Extended quark field is not supported");
}
}
#endif // MULTI_GPU
}
|
9dcf4f0915bfd045fff47bb4fa28f8c4c0a83d1b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
#if SHARED == 1
#define ACC(x,y,z) sharedMemAcc(x,y,z)
#else
#define ACC(x,y,z) naiveAcc(x,y,z)
#endif
//GLOBALS
dim3 threadsPerBlock(BLOCK_SIZE);
int numObjects;
const float planetMass = 3e8;
const __device__ float starMass = 5e10;
const float scene_scale = 2e2; //size of the height map in simulation space
glm::vec4 * dev_pos;
glm::vec3 * dev_vel;
glm::vec3 * dev_acc;
void checkCUDAError(const char *msg, int line = -1)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__host__ __device__
unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Function that generates static.
__host__ __device__
glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Generate randomized starting positions for the planets in the XY plane
//Also initialized the masses
__global__
void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale, float mass)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index)-0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = 0.0f;//rand.z;
arr[index].w = mass;
}
}
//Determine velocity from the distance from the center star. Not super physically accurate because
//the mass ratio is too close, but it makes for an interesting looking scene
__global__
void generateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec4 * pos)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z);
float r = glm::length(R) + EPSILON;
float s = sqrt(G*starMass/r);
glm::vec3 D = glm::normalize(glm::cross(R/r,glm::vec3(0,0,1)));
arr[index].x = s*D.x;
arr[index].y = s*D.y;
arr[index].z = s*D.z;
}
}
//Generate randomized starting velocities in the XY plane
__global__
void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = 0.0;//rand.z;
}
}
//TODO: Determine force between two bodies
__device__
glm::vec3 calculateAcceleration(glm::vec4 us, glm::vec4 them)
{
// G*m_us*m_them
//F = -------------
// r^2
//
// G*m_us*m_them G*m_them
//a = ------------- = --------
// m_us*r^2 r^2
glm::vec3 p_us = glm::vec3(us.x, us.y, us.z);
glm::vec3 p_them = glm::vec3(them.x, them.y, them.z);
float m_them = them.w;
float r = glm::distance(p_us, p_them);
return glm::normalize(p_them - p_us) * (float)G*m_them/(r*r);
}
//TODO: Core force calc kernel global memory
__device__
glm::vec3 naiveAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos)
{
glm::vec3 acc = calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass));
int i;
float tmp_i;
for (i=0; i<N; ++i) {
if (glm::distance(my_pos, their_pos[i]) > 10.0) {
acc += calculateAcceleration(my_pos, their_pos[i]);
}
}
return acc;
}
//TODO: Core force calc kernel shared memory
__device__
glm::vec3 sharedMemAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos)
{
int thread_index = threadIdx.x;
extern __shared__ glm::vec4 shared_pos[];
int num_splits = ceil((float)N/(float)(BLOCK_SIZE));
glm::vec3 acc = calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass));
int i, j, pos_index = 0;
for (i=0; i<num_splits; ++i) {
if (i*BLOCK_SIZE + thread_index < N) {
shared_pos[thread_index] = their_pos[i*BLOCK_SIZE + thread_index];
}
__syncthreads();
for (j=0; j<BLOCK_SIZE; ++j) {
++pos_index;
if (pos_index >= N) {
break;
}
if (glm::distance(my_pos, shared_pos[pos_index]) > 10.0) {
acc += calculateAcceleration(my_pos, shared_pos[pos_index]);
}
}
__syncthreads();
}
return acc;
}
//Simple Euler integration scheme
__global__
void updateF(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
glm::vec4 my_pos;
glm::vec3 accel;
if(index < N) my_pos = pos[index];
accel = ACC(N, my_pos, pos);
if(index < N) acc[index] = accel;
}
__global__
void updateS(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N )
{
vel[index] += acc[index] * dt;
pos[index].x += vel[index].x * dt;
pos[index].y += vel[index].y * dt;
pos[index].z += vel[index].z * dt;
}
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__
void sendToVBO(int N, glm::vec4 * pos, float * vbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = -2.0f / s_scale;
float c_scale_h = -2.0f / s_scale;
if(index<N)
{
vbo[4*index+0] = pos[index].x*c_scale_w;
vbo[4*index+1] = pos[index].y*c_scale_h;
vbo[4*index+2] = 0;
vbo[4*index+3] = 1;
}
}
//Update the texture pixel buffer object
//(This texture is where openGL pulls the data for the height map)
__global__
void sendToPBO(int N, glm::vec4 * pos, float4 * pbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int x = index % width;
int y = index / width;
float w2 = width / 2.0;
float h2 = height / 2.0;
float c_scale_w = width / s_scale;
float c_scale_h = height / s_scale;
glm::vec3 color(0.05, 0.15, 0.3);
glm::vec3 acc = ACC(N, glm::vec4((x-w2)/c_scale_w,(y-h2)/c_scale_h,0,1), pos);
if(x<width && y<height)
{
float mag = sqrt(sqrt(acc.x*acc.x + acc.y*acc.y + acc.z*acc.z));
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = (mag < 1.0f) ? mag : 1.0f;
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(BLOCK_SIZE)));
hipMalloc((void**)&dev_pos, N*sizeof(glm::vec4));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&dev_vel, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&dev_acc, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateRandomPosArray), dim3(fullBlocksPerGrid), dim3(BLOCK_SIZE), 0, 0, 1, numObjects, dev_pos, scene_scale, planetMass);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateCircularVelArray), dim3(fullBlocksPerGrid), dim3(BLOCK_SIZE), 0, 0, 2, numObjects, dev_vel, dev_pos);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void cudaNBodyUpdateWrapper(float dt)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(BLOCK_SIZE)));
printf("fullBlocksPerGrid: %d\n", fullBlocksPerGrid);
printf("BLOCK_SIZE: %d\n", BLOCK_SIZE);
printf("BLOCK_SIZE*sizeof(glm::vec4): %d\n", BLOCK_SIZE*sizeof(glm::vec4));
hipLaunchKernelGGL(( updateF), dim3(fullBlocksPerGrid), dim3(BLOCK_SIZE), BLOCK_SIZE*sizeof(glm::vec4), 0, numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( updateS), dim3(fullBlocksPerGrid), dim3(BLOCK_SIZE), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void cudaUpdateVBO(float * vbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(BLOCK_SIZE)));
hipLaunchKernelGGL(( sendToVBO), dim3(fullBlocksPerGrid), dim3(BLOCK_SIZE), 0, 0, numObjects, dev_pos, vbodptr, width, height, scene_scale);
hipDeviceSynchronize();
}
void cudaUpdatePBO(float4 * pbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(width*height)/float(BLOCK_SIZE)));
hipLaunchKernelGGL(( sendToPBO), dim3(fullBlocksPerGrid), dim3(BLOCK_SIZE), BLOCK_SIZE*sizeof(glm::vec4), 0, numObjects, dev_pos, pbodptr, width, height, scene_scale);
hipDeviceSynchronize();
}
| 9dcf4f0915bfd045fff47bb4fa28f8c4c0a83d1b.cu | #include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
#if SHARED == 1
#define ACC(x,y,z) sharedMemAcc(x,y,z)
#else
#define ACC(x,y,z) naiveAcc(x,y,z)
#endif
//GLOBALS
dim3 threadsPerBlock(BLOCK_SIZE);
int numObjects;
const float planetMass = 3e8;
const __device__ float starMass = 5e10;
const float scene_scale = 2e2; //size of the height map in simulation space
glm::vec4 * dev_pos;
glm::vec3 * dev_vel;
glm::vec3 * dev_acc;
void checkCUDAError(const char *msg, int line = -1)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__host__ __device__
unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Function that generates static.
__host__ __device__
glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Generate randomized starting positions for the planets in the XY plane
//Also initialized the masses
__global__
void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale, float mass)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index)-0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = 0.0f;//rand.z;
arr[index].w = mass;
}
}
//Determine velocity from the distance from the center star. Not super physically accurate because
//the mass ratio is too close, but it makes for an interesting looking scene
__global__
void generateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec4 * pos)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z);
float r = glm::length(R) + EPSILON;
float s = sqrt(G*starMass/r);
glm::vec3 D = glm::normalize(glm::cross(R/r,glm::vec3(0,0,1)));
arr[index].x = s*D.x;
arr[index].y = s*D.y;
arr[index].z = s*D.z;
}
}
//Generate randomized starting velocities in the XY plane
__global__
void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < N)
{
glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f);
arr[index].x = rand.x;
arr[index].y = rand.y;
arr[index].z = 0.0;//rand.z;
}
}
//TODO: Determine force between two bodies
__device__
glm::vec3 calculateAcceleration(glm::vec4 us, glm::vec4 them)
{
// G*m_us*m_them
//F = -------------
// r^2
//
// G*m_us*m_them G*m_them
//a = ------------- = --------
// m_us*r^2 r^2
glm::vec3 p_us = glm::vec3(us.x, us.y, us.z);
glm::vec3 p_them = glm::vec3(them.x, them.y, them.z);
float m_them = them.w;
float r = glm::distance(p_us, p_them);
return glm::normalize(p_them - p_us) * (float)G*m_them/(r*r);
}
//TODO: Core force calc kernel global memory
__device__
glm::vec3 naiveAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos)
{
glm::vec3 acc = calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass));
int i;
float tmp_i;
for (i=0; i<N; ++i) {
if (glm::distance(my_pos, their_pos[i]) > 10.0) {
acc += calculateAcceleration(my_pos, their_pos[i]);
}
}
return acc;
}
//TODO: Core force calc kernel shared memory
__device__
glm::vec3 sharedMemAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos)
{
int thread_index = threadIdx.x;
extern __shared__ glm::vec4 shared_pos[];
int num_splits = ceil((float)N/(float)(BLOCK_SIZE));
glm::vec3 acc = calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass));
int i, j, pos_index = 0;
for (i=0; i<num_splits; ++i) {
if (i*BLOCK_SIZE + thread_index < N) {
shared_pos[thread_index] = their_pos[i*BLOCK_SIZE + thread_index];
}
__syncthreads();
for (j=0; j<BLOCK_SIZE; ++j) {
++pos_index;
if (pos_index >= N) {
break;
}
if (glm::distance(my_pos, shared_pos[pos_index]) > 10.0) {
acc += calculateAcceleration(my_pos, shared_pos[pos_index]);
}
}
__syncthreads();
}
return acc;
}
//Simple Euler integration scheme
__global__
void updateF(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
glm::vec4 my_pos;
glm::vec3 accel;
if(index < N) my_pos = pos[index];
accel = ACC(N, my_pos, pos);
if(index < N) acc[index] = accel;
}
__global__
void updateS(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if( index < N )
{
vel[index] += acc[index] * dt;
pos[index].x += vel[index].x * dt;
pos[index].y += vel[index].y * dt;
pos[index].z += vel[index].z * dt;
}
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__
void sendToVBO(int N, glm::vec4 * pos, float * vbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = -2.0f / s_scale;
float c_scale_h = -2.0f / s_scale;
if(index<N)
{
vbo[4*index+0] = pos[index].x*c_scale_w;
vbo[4*index+1] = pos[index].y*c_scale_h;
vbo[4*index+2] = 0;
vbo[4*index+3] = 1;
}
}
//Update the texture pixel buffer object
//(This texture is where openGL pulls the data for the height map)
__global__
void sendToPBO(int N, glm::vec4 * pos, float4 * pbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int x = index % width;
int y = index / width;
float w2 = width / 2.0;
float h2 = height / 2.0;
float c_scale_w = width / s_scale;
float c_scale_h = height / s_scale;
glm::vec3 color(0.05, 0.15, 0.3);
glm::vec3 acc = ACC(N, glm::vec4((x-w2)/c_scale_w,(y-h2)/c_scale_h,0,1), pos);
if(x<width && y<height)
{
float mag = sqrt(sqrt(acc.x*acc.x + acc.y*acc.y + acc.z*acc.z));
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = (mag < 1.0f) ? mag : 1.0f;
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(BLOCK_SIZE)));
cudaMalloc((void**)&dev_pos, N*sizeof(glm::vec4));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&dev_vel, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&dev_acc, N*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
generateRandomPosArray<<<fullBlocksPerGrid, BLOCK_SIZE>>>(1, numObjects, dev_pos, scene_scale, planetMass);
checkCUDAErrorWithLine("Kernel failed!");
generateCircularVelArray<<<fullBlocksPerGrid, BLOCK_SIZE>>>(2, numObjects, dev_vel, dev_pos);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void cudaNBodyUpdateWrapper(float dt)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(BLOCK_SIZE)));
printf("fullBlocksPerGrid: %d\n", fullBlocksPerGrid);
printf("BLOCK_SIZE: %d\n", BLOCK_SIZE);
printf("BLOCK_SIZE*sizeof(glm::vec4): %d\n", BLOCK_SIZE*sizeof(glm::vec4));
updateF<<<fullBlocksPerGrid, BLOCK_SIZE, BLOCK_SIZE*sizeof(glm::vec4)>>>(numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
updateS<<<fullBlocksPerGrid, BLOCK_SIZE>>>(numObjects, dt, dev_pos, dev_vel, dev_acc);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void cudaUpdateVBO(float * vbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(BLOCK_SIZE)));
sendToVBO<<<fullBlocksPerGrid, BLOCK_SIZE>>>(numObjects, dev_pos, vbodptr, width, height, scene_scale);
cudaThreadSynchronize();
}
void cudaUpdatePBO(float4 * pbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(width*height)/float(BLOCK_SIZE)));
sendToPBO<<<fullBlocksPerGrid, BLOCK_SIZE, BLOCK_SIZE*sizeof(glm::vec4)>>>(numObjects, dev_pos, pbodptr, width, height, scene_scale);
cudaThreadSynchronize();
}
|
e04809f738428b153e95642a5665ededa3fe8092.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <chrono>
#define LAMBERT 1
#define BLINN 0
#define POINT 0
#define TRI 0
#define LINE 1
#define TEXTURE 1
#define BILINEAR 1
#define PERSP_CORRECT 1
#define BACKCULL 0
#define TIMER 0
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
glm::vec3 camPos;
int texWidth, texHeight;
glm::vec3 mvpPos;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
glm::vec3 camPos;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static float * dev_depth = NULL; // you might need this buffer when doing depth test
static int * dev_mutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
// TODO: add your fragment shader code here
Fragment fragment = fragmentBuffer[index];
#if POINT || LINE
framebuffer[index] = fragment.color;
#elif LAMBERT
glm::vec3 v = fragment.eyePos;
glm::vec3 n = fragment.eyeNor;
glm::vec3 fragColor(1, 0, 0);
glm::vec3 lightPos = glm::vec3(2, 2, 2) + v;
glm::vec3 L = glm::normalize(lightPos - v);
float lambert = glm::max(0.f, glm::dot(L, n));
glm::vec3 ambient = glm::vec3(0.1) * fragment.color;
framebuffer[index] = ambient + 0.9f * lambert * fragment.color;
#elif BLINN
glm::vec3 lights[2] = { glm::vec3(2, 2, 2) };
framebuffer[index] = glm::vec3(0.0f);
for (glm::vec3 lightPos : lights) {
glm::vec3 v = fragment.eyePos;
glm::vec3 n = glm::normalize(fragment.eyeNor);
glm::vec3 camPos = fragment.camPos;
glm::vec3 L = lightPos;
glm::vec3 Ev = glm::normalize(-v);
glm::vec3 R = glm::normalize(-glm::reflect(v - camPos, n));
glm::vec3 ambient = glm::vec3(0.1) * fragment.color;
float specular = glm::pow(glm::max(glm::dot(R, Ev), 0.f), 32.f);
float lambert = glm::max(0.f, glm::dot(L, n));
glm::vec3 diffuse = lambert * fragment.color;
framebuffer[index] += glm::clamp(ambient + diffuse*glm::vec3(0.7)
+ specular * glm::vec3(0.2), glm::vec3(0), glm::vec3(1));
}
#else
framebuffer[index] = fragment.color;
#endif
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(float));
hipFree(dev_mutex);
hipMalloc(&dev_mutex, width * height * sizeof(int));
hipMemset(dev_mutex, 0, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, float * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
__forceinline__ __device__ glm::vec3 baryPos(VertexOut *v, glm::vec3 barycentric) {
VertexOut vert0, vert1, vert2;
vert0 = v[0];
vert1 = v[1];
vert2 = v[2];
glm::vec3 p0 = vert0.mvpPos * barycentric[0];
glm::vec3 p1 = vert1.mvpPos * barycentric[1];
glm::vec3 p2 = vert2.mvpPos * barycentric[2];
return p0 + p1 + p2;
}
__forceinline__ __device__ glm::vec3 baryNorm(VertexOut *v, glm::vec3 barycentric) {
VertexOut vert0, vert1, vert2;
vert0 = v[0];
vert1 = v[1];
vert2 = v[2];
glm::vec3 p0 = vert0.eyeNor * barycentric[0];
glm::vec3 p1 = vert1.eyeNor * barycentric[1];
glm::vec3 p2 = vert2.eyeNor * barycentric[2];
return p0 + p1 + p2;
}
__forceinline__ __device__ glm::vec2 baryUVs(VertexOut *v, glm::vec3 barycentric) {
VertexOut vert0, vert1, vert2;
vert0 = v[0];
vert1 = v[1];
vert2 = v[2];
glm::vec2 p0 = vert0.texcoord0 * barycentric[0];
glm::vec2 p1 = vert1.texcoord0 * barycentric[1];
glm::vec2 p2 = vert2.texcoord0 * barycentric[2];
return p0 + p1 + p2;
}
__forceinline__ __device__ glm::vec2 baryUVsPerspective(VertexOut *v, glm::vec3 barycentric) {
VertexOut v0, v1, v2;
v0 = v[0];
v1 = v[1];
v2 = v[2];
glm::vec2 texCoordZ = barycentric.x * (v0.texcoord0 / v0.eyePos.z) + barycentric.y * (v1.texcoord0 / v1.eyePos.z) + barycentric.z * (v2.texcoord0 / v2.eyePos.z);
float coordZ = barycentric.x * (1.0f / v0.eyePos.z) + barycentric.y * (1.0f / v1.eyePos.z) + barycentric.z * (1.0f / v2.eyePos.z);
return texCoordZ / coordZ;
}
__device__
glm::vec3 getColor(TextureData* tex, int width, float x, float y) {
int i = x + y * width;
return glm::vec3(tex[i * 3], tex[i * 3 + 1], tex[i * 3 + 2]) / 255.f;
}
__global__ void kernelRasterize(int totalNumPrimitives, Primitive *dev_primitives, Fragment *dev_fragmentBuffer, float *dev_depth, int *dev_mutex, int width, int height) {
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid > totalNumPrimitives) return;
// compute triangle from primitive
Primitive primitive = dev_primitives[pid];
glm::vec3 v0, v1, v2;
v0 = glm::vec3(primitive.v[0].pos);
v1 = glm::vec3(primitive.v[1].pos);
v2 = glm::vec3(primitive.v[2].pos);
glm::vec3 triangle[3] = { v0, v1, v2 };
#if BACKCULL
if (glm::dot(primitive.v->eyeNor, primitive.v->camPos - primitive.v->eyePos) < -1.f) {
return;
}
#endif
#if POINT
int x, y;
for (int i = 0; i < 3; i++) {
x = triangle[i].x;
y = triangle[i].y;
int fragmentId = x + y * width;
if ((x >= 0 && x <= width - 1) && (y >= 0 && y <= height - 1)) {
dev_fragmentBuffer[fragmentId].color = primitive.v->eyeNor;
}
}
#elif LINE
for (int i = 0; i < 3; i++) {
int x1 = triangle[i].x;
int x2 = triangle[i + 1].x;
int y1 = triangle[i].y;
int y2 = triangle[i + 1].y;
int dx = x2 - x1;
int dy = y2 - y1;
for (int x = x1; x <= x2; x++) {
int y = y1 + dy * (x - x1) / dx;
int fragmentId = x + y * width;
if (x < 0 || x >= width) continue;
if (y < 0 || y >= height) continue;
dev_fragmentBuffer[fragmentId].color = primitive.v->eyeNor;
}
}
#elif TRI
// compute bounding box and clip to screen
AABB boundingBox = getAABBForTriangle(triangle);
const int minX = glm::min(width - 1, glm::max(0, (int)boundingBox.min.x));
const int minY = glm::min(height - 1, glm::max(0, (int)boundingBox.min.y));
const int maxX = glm::min(width - 1, glm::max(0, (int)boundingBox.max.x));
const int maxY = glm::min(height - 1, glm::max(0, (int)boundingBox.max.y));
// iterate over bounding box and test which pixels are inside
for (int x = minX; x <= maxX; ++x) {
for (int y = minY; y <= maxY; ++y) {
glm::vec3 barycentric = calculateBarycentricCoordinate(triangle, glm::vec2(x, y));
bool inTriangle = isBarycentricCoordInBounds(barycentric);
if (inTriangle) {
const int fragmentId = x + (y * width);
bool isSet;
do {
// it was unlocked so we lock it
isSet = atomicCAS(&dev_mutex[fragmentId], 0, 1) == 0;
if (isSet) {
float depth = -getZAtCoordinate(barycentric, triangle) * INT_MAX;
// if this fragment is closer, we set the new depth and fragment
if (depth < dev_depth[fragmentId]) {
dev_depth[fragmentId] = depth;
Fragment &fragment = dev_fragmentBuffer[fragmentId];
fragment.eyeNor = baryNorm(primitive.v, barycentric);
fragment.eyePos = baryPos(primitive.v, barycentric);
fragment.color = fragment.eyeNor;
fragment.color = glm::vec3(1.f);
fragment.camPos = primitive.v[0].camPos;
#if PERSP_CORRECT
glm::vec2 uvs = baryUVsPerspective(primitive.v, barycentric);
fragment.texcoord0 = uvs;
auto v0 = primitive.v[0];
auto v1 = primitive.v[1];
auto v2 = primitive.v[2];
glm::vec2 texCoordZ = barycentric.x * (v0.texcoord0 / v0.eyePos.z) + barycentric.y * (v1.texcoord0 / v1.eyePos.z) + barycentric.z * (v2.texcoord0 / v2.eyePos.z);
float coordZ = barycentric.x * (1.0f / v0.eyePos.z) + barycentric.y * (1.0f / v1.eyePos.z) + barycentric.z * (1.0f / v2.eyePos.z);
fragment.texcoord0 = texCoordZ / coordZ;
#else
glm::vec2 uvs = baryUVs(primitive.v, barycentric);
fragment.texcoord0 = uvs;
#endif
#if BILINEAR
if (primitive.v->dev_diffuseTex != NULL) {
float x = uvs[0] * primitive.v->texWidth;
float y = uvs[1] * primitive.v->texHeight;
int xx = glm::floor(x);
int yy = glm::floor(y);
float xfract = x - xx;
float yfract = y - yy;
float xinv = 1.f - xfract;
float yinv = 1.f - yfract;
TextureData *text = primitive.v->dev_diffuseTex;
int width = primitive.v->texWidth;
glm::vec3 tex00 = getColor(text, width, xx, yy);
glm::vec3 tex10 = getColor(text, width, xx + 1, yy);
glm::vec3 tex01 = getColor(text, width, xx, yy + 1);
glm::vec3 tex11 = getColor(text, width, xx + 1, yy + 1);
fragment.color = (tex00 * xinv + tex10 * xfract) * yinv + (tex01 * xinv + tex11 * xfract) * yfract;
}
#elif TEXTURE
if (primitive.v->dev_diffuseTex != NULL) {
float x = fragment.texcoord0[0] * primitive.v->texWidth;
float y = fragment.texcoord0[1] * primitive.v->texHeight;
TextureData *text = primitive.v->dev_diffuseTex;
int width = primitive.v->texWidth;
fragment.color = getColor(text, width, glm::floor(x), glm::floor(y));
}
#else
//fragment.color = baryNorm(primitive.v, barycentric);
#endif
}
dev_mutex[fragmentId] = 0;
}
} while (!isSet);
}
}
}
#endif
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
glm::vec4 objPos(primitive.dev_position[vid], 1.f);
glm::vec4 eyePos = MVP * objPos;
glm::vec4 mvpPos = eyePos;
mvpPos /= mvpPos.w;
mvpPos.x = 0.5f * float(width) * (mvpPos.x + 1.f);
mvpPos.y = 0.5f * float(height) * (1.f - mvpPos.y);
mvpPos.z = -mvpPos.z;
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
VertexOut &vo = primitive.dev_verticesOut[vid];
vo.pos = mvpPos;
vo.eyePos = glm::vec3(MV * objPos);//glm::vec3(eyePos[0], eyePos[1], eyePos[2]);
vo.mvpPos = glm::vec3(MVP * objPos);
vo.eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
vo.camPos = glm::vec3(MV * glm::vec4(0, 0, 0, 1));
if (primitive.dev_texcoord0) vo.texcoord0 = primitive.dev_texcoord0[vid];
if (primitive.dev_diffuseTex) vo.dev_diffuseTex = primitive.dev_diffuseTex;
vo.texHeight = primitive.diffuseTexHeight;
vo.texWidth = primitive.diffuseTexWidth;
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t start_time, end_time;
float elapsed_time;
std::chrono::duration<double, std::milli> dur;
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
#if TIMER
hipDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "vertex processing elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
#if TIMER
hipDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "primitive assembly elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
// TODO: rasterize
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
kernelRasterize << <numBlocksForPrimitives, numThreadsPerBlock >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer, dev_depth, dev_mutex, width, height);
#if TIMER
hipDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "rasterize elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
// Copy depthbuffer colors into framebuffer
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
#if TIMER
hipDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "fragment shader elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = NULL;
checkCUDAError("rasterize Free");
}
| e04809f738428b153e95642a5665ededa3fe8092.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <chrono>
#define LAMBERT 1
#define BLINN 0
#define POINT 0
#define TRI 0
#define LINE 1
#define TEXTURE 1
#define BILINEAR 1
#define PERSP_CORRECT 1
#define BACKCULL 0
#define TIMER 0
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
glm::vec3 camPos;
int texWidth, texHeight;
glm::vec3 mvpPos;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
glm::vec3 camPos;
VertexAttributeTexcoord texcoord0;
TextureData* dev_diffuseTex;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static float * dev_depth = NULL; // you might need this buffer when doing depth test
static int * dev_mutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
// TODO: add your fragment shader code here
Fragment fragment = fragmentBuffer[index];
#if POINT || LINE
framebuffer[index] = fragment.color;
#elif LAMBERT
glm::vec3 v = fragment.eyePos;
glm::vec3 n = fragment.eyeNor;
glm::vec3 fragColor(1, 0, 0);
glm::vec3 lightPos = glm::vec3(2, 2, 2) + v;
glm::vec3 L = glm::normalize(lightPos - v);
float lambert = glm::max(0.f, glm::dot(L, n));
glm::vec3 ambient = glm::vec3(0.1) * fragment.color;
framebuffer[index] = ambient + 0.9f * lambert * fragment.color;
#elif BLINN
glm::vec3 lights[2] = { glm::vec3(2, 2, 2) };
framebuffer[index] = glm::vec3(0.0f);
for (glm::vec3 lightPos : lights) {
glm::vec3 v = fragment.eyePos;
glm::vec3 n = glm::normalize(fragment.eyeNor);
glm::vec3 camPos = fragment.camPos;
glm::vec3 L = lightPos;
glm::vec3 Ev = glm::normalize(-v);
glm::vec3 R = glm::normalize(-glm::reflect(v - camPos, n));
glm::vec3 ambient = glm::vec3(0.1) * fragment.color;
float specular = glm::pow(glm::max(glm::dot(R, Ev), 0.f), 32.f);
float lambert = glm::max(0.f, glm::dot(L, n));
glm::vec3 diffuse = lambert * fragment.color;
framebuffer[index] += glm::clamp(ambient + diffuse*glm::vec3(0.7)
+ specular * glm::vec3(0.2), glm::vec3(0), glm::vec3(1));
}
#else
framebuffer[index] = fragment.color;
#endif
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(float));
cudaFree(dev_mutex);
cudaMalloc(&dev_mutex, width * height * sizeof(int));
cudaMemset(dev_mutex, 0, width * height * sizeof(int));
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, float * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
__forceinline__ __device__ glm::vec3 baryPos(VertexOut *v, glm::vec3 barycentric) {
VertexOut vert0, vert1, vert2;
vert0 = v[0];
vert1 = v[1];
vert2 = v[2];
glm::vec3 p0 = vert0.mvpPos * barycentric[0];
glm::vec3 p1 = vert1.mvpPos * barycentric[1];
glm::vec3 p2 = vert2.mvpPos * barycentric[2];
return p0 + p1 + p2;
}
__forceinline__ __device__ glm::vec3 baryNorm(VertexOut *v, glm::vec3 barycentric) {
VertexOut vert0, vert1, vert2;
vert0 = v[0];
vert1 = v[1];
vert2 = v[2];
glm::vec3 p0 = vert0.eyeNor * barycentric[0];
glm::vec3 p1 = vert1.eyeNor * barycentric[1];
glm::vec3 p2 = vert2.eyeNor * barycentric[2];
return p0 + p1 + p2;
}
__forceinline__ __device__ glm::vec2 baryUVs(VertexOut *v, glm::vec3 barycentric) {
VertexOut vert0, vert1, vert2;
vert0 = v[0];
vert1 = v[1];
vert2 = v[2];
glm::vec2 p0 = vert0.texcoord0 * barycentric[0];
glm::vec2 p1 = vert1.texcoord0 * barycentric[1];
glm::vec2 p2 = vert2.texcoord0 * barycentric[2];
return p0 + p1 + p2;
}
__forceinline__ __device__ glm::vec2 baryUVsPerspective(VertexOut *v, glm::vec3 barycentric) {
VertexOut v0, v1, v2;
v0 = v[0];
v1 = v[1];
v2 = v[2];
glm::vec2 texCoordZ = barycentric.x * (v0.texcoord0 / v0.eyePos.z) + barycentric.y * (v1.texcoord0 / v1.eyePos.z) + barycentric.z * (v2.texcoord0 / v2.eyePos.z);
float coordZ = barycentric.x * (1.0f / v0.eyePos.z) + barycentric.y * (1.0f / v1.eyePos.z) + barycentric.z * (1.0f / v2.eyePos.z);
return texCoordZ / coordZ;
}
__device__
glm::vec3 getColor(TextureData* tex, int width, float x, float y) {
int i = x + y * width;
return glm::vec3(tex[i * 3], tex[i * 3 + 1], tex[i * 3 + 2]) / 255.f;
}
__global__ void kernelRasterize(int totalNumPrimitives, Primitive *dev_primitives, Fragment *dev_fragmentBuffer, float *dev_depth, int *dev_mutex, int width, int height) {
int pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid > totalNumPrimitives) return;
// compute triangle from primitive
Primitive primitive = dev_primitives[pid];
glm::vec3 v0, v1, v2;
v0 = glm::vec3(primitive.v[0].pos);
v1 = glm::vec3(primitive.v[1].pos);
v2 = glm::vec3(primitive.v[2].pos);
glm::vec3 triangle[3] = { v0, v1, v2 };
#if BACKCULL
if (glm::dot(primitive.v->eyeNor, primitive.v->camPos - primitive.v->eyePos) < -1.f) {
return;
}
#endif
#if POINT
int x, y;
for (int i = 0; i < 3; i++) {
x = triangle[i].x;
y = triangle[i].y;
int fragmentId = x + y * width;
if ((x >= 0 && x <= width - 1) && (y >= 0 && y <= height - 1)) {
dev_fragmentBuffer[fragmentId].color = primitive.v->eyeNor;
}
}
#elif LINE
for (int i = 0; i < 3; i++) {
int x1 = triangle[i].x;
int x2 = triangle[i + 1].x;
int y1 = triangle[i].y;
int y2 = triangle[i + 1].y;
int dx = x2 - x1;
int dy = y2 - y1;
for (int x = x1; x <= x2; x++) {
int y = y1 + dy * (x - x1) / dx;
int fragmentId = x + y * width;
if (x < 0 || x >= width) continue;
if (y < 0 || y >= height) continue;
dev_fragmentBuffer[fragmentId].color = primitive.v->eyeNor;
}
}
#elif TRI
// compute bounding box and clip to screen
AABB boundingBox = getAABBForTriangle(triangle);
const int minX = glm::min(width - 1, glm::max(0, (int)boundingBox.min.x));
const int minY = glm::min(height - 1, glm::max(0, (int)boundingBox.min.y));
const int maxX = glm::min(width - 1, glm::max(0, (int)boundingBox.max.x));
const int maxY = glm::min(height - 1, glm::max(0, (int)boundingBox.max.y));
// iterate over bounding box and test which pixels are inside
for (int x = minX; x <= maxX; ++x) {
for (int y = minY; y <= maxY; ++y) {
glm::vec3 barycentric = calculateBarycentricCoordinate(triangle, glm::vec2(x, y));
bool inTriangle = isBarycentricCoordInBounds(barycentric);
if (inTriangle) {
const int fragmentId = x + (y * width);
bool isSet;
do {
// it was unlocked so we lock it
isSet = atomicCAS(&dev_mutex[fragmentId], 0, 1) == 0;
if (isSet) {
float depth = -getZAtCoordinate(barycentric, triangle) * INT_MAX;
// if this fragment is closer, we set the new depth and fragment
if (depth < dev_depth[fragmentId]) {
dev_depth[fragmentId] = depth;
Fragment &fragment = dev_fragmentBuffer[fragmentId];
fragment.eyeNor = baryNorm(primitive.v, barycentric);
fragment.eyePos = baryPos(primitive.v, barycentric);
fragment.color = fragment.eyeNor;
fragment.color = glm::vec3(1.f);
fragment.camPos = primitive.v[0].camPos;
#if PERSP_CORRECT
glm::vec2 uvs = baryUVsPerspective(primitive.v, barycentric);
fragment.texcoord0 = uvs;
auto v0 = primitive.v[0];
auto v1 = primitive.v[1];
auto v2 = primitive.v[2];
glm::vec2 texCoordZ = barycentric.x * (v0.texcoord0 / v0.eyePos.z) + barycentric.y * (v1.texcoord0 / v1.eyePos.z) + barycentric.z * (v2.texcoord0 / v2.eyePos.z);
float coordZ = barycentric.x * (1.0f / v0.eyePos.z) + barycentric.y * (1.0f / v1.eyePos.z) + barycentric.z * (1.0f / v2.eyePos.z);
fragment.texcoord0 = texCoordZ / coordZ;
#else
glm::vec2 uvs = baryUVs(primitive.v, barycentric);
fragment.texcoord0 = uvs;
#endif
#if BILINEAR
if (primitive.v->dev_diffuseTex != NULL) {
float x = uvs[0] * primitive.v->texWidth;
float y = uvs[1] * primitive.v->texHeight;
int xx = glm::floor(x);
int yy = glm::floor(y);
float xfract = x - xx;
float yfract = y - yy;
float xinv = 1.f - xfract;
float yinv = 1.f - yfract;
TextureData *text = primitive.v->dev_diffuseTex;
int width = primitive.v->texWidth;
glm::vec3 tex00 = getColor(text, width, xx, yy);
glm::vec3 tex10 = getColor(text, width, xx + 1, yy);
glm::vec3 tex01 = getColor(text, width, xx, yy + 1);
glm::vec3 tex11 = getColor(text, width, xx + 1, yy + 1);
fragment.color = (tex00 * xinv + tex10 * xfract) * yinv + (tex01 * xinv + tex11 * xfract) * yfract;
}
#elif TEXTURE
if (primitive.v->dev_diffuseTex != NULL) {
float x = fragment.texcoord0[0] * primitive.v->texWidth;
float y = fragment.texcoord0[1] * primitive.v->texHeight;
TextureData *text = primitive.v->dev_diffuseTex;
int width = primitive.v->texWidth;
fragment.color = getColor(text, width, glm::floor(x), glm::floor(y));
}
#else
//fragment.color = baryNorm(primitive.v, barycentric);
#endif
}
dev_mutex[fragmentId] = 0;
}
} while (!isSet);
}
}
}
#endif
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
glm::vec4 objPos(primitive.dev_position[vid], 1.f);
glm::vec4 eyePos = MVP * objPos;
glm::vec4 mvpPos = eyePos;
mvpPos /= mvpPos.w;
mvpPos.x = 0.5f * float(width) * (mvpPos.x + 1.f);
mvpPos.y = 0.5f * float(height) * (1.f - mvpPos.y);
mvpPos.z = -mvpPos.z;
// TODO: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
VertexOut &vo = primitive.dev_verticesOut[vid];
vo.pos = mvpPos;
vo.eyePos = glm::vec3(MV * objPos);//glm::vec3(eyePos[0], eyePos[1], eyePos[2]);
vo.mvpPos = glm::vec3(MVP * objPos);
vo.eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
vo.camPos = glm::vec3(MV * glm::vec4(0, 0, 0, 1));
if (primitive.dev_texcoord0) vo.texcoord0 = primitive.dev_texcoord0[vid];
if (primitive.dev_diffuseTex) vo.dev_diffuseTex = primitive.dev_diffuseTex;
vo.texHeight = primitive.diffuseTexHeight;
vo.texWidth = primitive.diffuseTexWidth;
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
using time_point_t = std::chrono::high_resolution_clock::time_point;
time_point_t start_time, end_time;
float elapsed_time;
std::chrono::duration<double, std::milli> dur;
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
#if TIMER
cudaDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "vertex processing elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
#if TIMER
cudaDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "primitive assembly elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
// TODO: rasterize
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
kernelRasterize << <numBlocksForPrimitives, numThreadsPerBlock >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer, dev_depth, dev_mutex, width, height);
#if TIMER
cudaDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "rasterize elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
// Copy depthbuffer colors into framebuffer
#if TIMER
start_time = std::chrono::high_resolution_clock::now();
#endif
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
#if TIMER
cudaDeviceSynchronize();
end_time = std::chrono::high_resolution_clock::now();
dur = end_time - start_time;
elapsed_time = static_cast<decltype(elapsed_time)>(dur.count());
std::cout << "fragment shader elapsed time: " << elapsed_time << "ms." << std::endl;
#endif
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
checkCUDAError("rasterize Free");
}
|
4ef99984bcc07177b147e1cd1539da20d5df8953.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/cuda/numeric_limits.cuh"
#include "chainerx/cuda/reduce.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/reduction.h"
#include "chainerx/kernels/sorting.h"
#include "chainerx/macro.h"
#include "chainerx/numeric_limits.h"
#include "chainerx/reduction_kernel_arg.h"
#include "chainerx/shape.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ArgMaxImpl {
using CudaType = cuda_internal::DataType<T>;
struct MaxAndArgMax {
CudaType max;
int64_t argmax;
};
__device__ MaxAndArgMax Identity() { return {CudaType{}, -1}; }
__device__ MaxAndArgMax MapIn(CudaType in, int64_t index) { return {in, index}; }
__device__ void Reduce(MaxAndArgMax next, MaxAndArgMax& accum) {
// Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated.
if (next.argmax != -1 && (accum.argmax == -1 || accum.max < next.max)) {
accum = next;
}
}
__device__ int64_t MapOut(MaxAndArgMax accum) { return accum.argmax; }
};
class CudaArgMaxKernel : public ArgMaxKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(a.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, int64_t>(a, axis, out, ArgMaxImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ArgMaxKernel, CudaArgMaxKernel);
} // namespace
namespace {
template <typename T>
struct ArgMinImpl {
using CudaType = cuda_internal::DataType<T>;
struct MinAndArgMin {
CudaType min;
int64_t argmin;
};
__device__ MinAndArgMin Identity() { return {CudaType{}, -1}; }
__device__ MinAndArgMin MapIn(CudaType in, int64_t index) { return {in, index}; }
__device__ void Reduce(MinAndArgMin next, MinAndArgMin& accum) {
// Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated.
if (next.argmin != -1 && (accum.argmin == -1 || accum.min > next.min)) {
accum = next;
}
}
__device__ int64_t MapOut(MinAndArgMin accum) { return accum.argmin; }
};
class CudaArgMinKernel : public ArgMinKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(a.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, int64_t>(a, axis, out, ArgMinImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ArgMinKernel, CudaArgMinKernel);
} // namespace
namespace {
template <typename In, typename Out>
struct SumImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ OutCudaType Identity() { return OutCudaType{0}; }
__device__ OutCudaType MapIn(InCudaType in, int64_t /*index*/) { return static_cast<OutCudaType>(in); }
__device__ void Reduce(OutCudaType next, OutCudaType& accum) { accum += next; }
__device__ OutCudaType MapOut(OutCudaType accum) { return accum; }
};
class CudaSumKernel : public SumKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
auto do_sum = [&a, &axis, &out](auto in_pt, auto out_pt) {
using In = typename decltype(in_pt)::type;
using Out = typename decltype(out_pt)::type;
Reduce<In, Out>(a, axis, out, SumImpl<In, Out>{});
};
VisitDtype(out.dtype(), [a_dtype = a.dtype(), &do_sum](auto out_pt) { VisitDtype(a_dtype, do_sum, out_pt); });
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SumKernel, CudaSumKernel);
} // namespace
} // namespace cuda
} // namespace chainerx
| 4ef99984bcc07177b147e1cd1539da20d5df8953.cu | #include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/cuda/numeric_limits.cuh"
#include "chainerx/cuda/reduce.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/reduction.h"
#include "chainerx/kernels/sorting.h"
#include "chainerx/macro.h"
#include "chainerx/numeric_limits.h"
#include "chainerx/reduction_kernel_arg.h"
#include "chainerx/shape.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ArgMaxImpl {
using CudaType = cuda_internal::DataType<T>;
struct MaxAndArgMax {
CudaType max;
int64_t argmax;
};
__device__ MaxAndArgMax Identity() { return {CudaType{}, -1}; }
__device__ MaxAndArgMax MapIn(CudaType in, int64_t index) { return {in, index}; }
__device__ void Reduce(MaxAndArgMax next, MaxAndArgMax& accum) {
// Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated.
if (next.argmax != -1 && (accum.argmax == -1 || accum.max < next.max)) {
accum = next;
}
}
__device__ int64_t MapOut(MaxAndArgMax accum) { return accum.argmax; }
};
class CudaArgMaxKernel : public ArgMaxKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(a.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, int64_t>(a, axis, out, ArgMaxImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ArgMaxKernel, CudaArgMaxKernel);
} // namespace
namespace {
template <typename T>
struct ArgMinImpl {
using CudaType = cuda_internal::DataType<T>;
struct MinAndArgMin {
CudaType min;
int64_t argmin;
};
__device__ MinAndArgMin Identity() { return {CudaType{}, -1}; }
__device__ MinAndArgMin MapIn(CudaType in, int64_t index) { return {in, index}; }
__device__ void Reduce(MinAndArgMin next, MinAndArgMin& accum) {
// Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated.
if (next.argmin != -1 && (accum.argmin == -1 || accum.min > next.min)) {
accum = next;
}
}
__device__ int64_t MapOut(MinAndArgMin accum) { return accum.argmin; }
};
class CudaArgMinKernel : public ArgMinKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(a.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, int64_t>(a, axis, out, ArgMinImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ArgMinKernel, CudaArgMinKernel);
} // namespace
namespace {
template <typename In, typename Out>
struct SumImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ OutCudaType Identity() { return OutCudaType{0}; }
__device__ OutCudaType MapIn(InCudaType in, int64_t /*index*/) { return static_cast<OutCudaType>(in); }
__device__ void Reduce(OutCudaType next, OutCudaType& accum) { accum += next; }
__device__ OutCudaType MapOut(OutCudaType accum) { return accum; }
};
class CudaSumKernel : public SumKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
auto do_sum = [&a, &axis, &out](auto in_pt, auto out_pt) {
using In = typename decltype(in_pt)::type;
using Out = typename decltype(out_pt)::type;
Reduce<In, Out>(a, axis, out, SumImpl<In, Out>{});
};
VisitDtype(out.dtype(), [a_dtype = a.dtype(), &do_sum](auto out_pt) { VisitDtype(a_dtype, do_sum, out_pt); });
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SumKernel, CudaSumKernel);
} // namespace
} // namespace cuda
} // namespace chainerx
|
92ce3d65081ccc05de99bc7f1c76a40aa8216243.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_core.cuh"
#define INCLUDED_FROM_DCONST_DEFINER
#include "dconsts_core.cuh"
#include "errorhandler_cuda.cuh"
#include "common/config.h"
void print_gpu_config()
{
int n_devices;
if (hipGetDeviceCount(&n_devices) != hipSuccess) CRASH("No CUDA devices found!");
printf("Num CUDA devices found: %u\n", n_devices);
int initial_device;
hipGetDevice(&initial_device);
for (int i = 0; i < n_devices; i++) {
hipSetDevice(i);
hipDeviceProp_t props;
hipGetDeviceProperties(&props, i);
printf("--------------------------------------------------\n");
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", props.name);
printf(" Compute capability: %d.%d\n", props.major, props.minor);
printf(" Global memory\n");
printf(" Memory Clock Rate (MHz): %d\n", props.memoryClockRate / (1000));
printf(" Memory Bus Width (bits): %d\n", props.memoryBusWidth);
printf(" Peak Memory Bandwidth (GiB/s): %f\n", 2.0*props.memoryClockRate*(props.memoryBusWidth/8)/(1024*1024));
printf(" ECC enabled: %d\n", props.ECCEnabled);
//Memory usage
size_t free_bytes, total_bytes;
CUDA_ERRCHK( hipMemGetInfo(&free_bytes, &total_bytes) );
const size_t used_bytes = total_bytes - free_bytes;
printf(" Total global mem: %.2f GiB\n", props.totalGlobalMem / (1024.0*1024*1024));
printf(" Gmem used (GiB): %.2f\n", used_bytes / (1024.0*1024*1024));
printf(" Gmem memory free (GiB): %.2f\n", free_bytes / (1024.0*1024*1024));
printf(" Gmem memory total (GiB): %.2f\n", total_bytes / (1024.0*1024*1024));
printf(" Caches\n");
printf(" L2 size: %d KiB\n", props.l2CacheSize / (1024));
printf(" Total const mem: %ld KiB\n", props.totalConstMem / (1024));
printf(" Shared mem per block: %ld KiB\n", props.sharedMemPerBlock / (1024));
printf(" Other\n");
printf(" Warp size: %d\n", props.warpSize);
//printf(" Single to double perf. ratio: %dx\n", props.singleToDoublePrecisionPerfRatio); //Not supported with older CUDA versions
printf(" Stream priorities supported: %d\n", props.streamPrioritiesSupported);
printf("--------------------------------------------------\n");
}
hipSetDevice(initial_device);
}
void load_forcing_dconsts_cuda_core(ForcingParams* forcing_params)
{
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_ENABLED, &forcing_params->forcing_enabled, sizeof(bool)) );
//Copy forcing coefficients to the device's constant memory
const size_t k_idx = forcing_params->k_idx;
CUDA_ERRCHK( hipMemcpyToSymbol(d_KK_VEC_X, &forcing_params->kk_x[k_idx], sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_KK_VEC_Y, &forcing_params->kk_y[k_idx], sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_KK_VEC_Z, &forcing_params->kk_z[k_idx], sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_KK_PART_X, &forcing_params->kk_part_x, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_KK_PART_Y, &forcing_params->kk_part_y, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_KK_PART_Z, &forcing_params->kk_part_z, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_PHI, &forcing_params->phi, sizeof(real)) );
}
void load_hydro_dconsts_cuda_core(CParamConfig* cparams, RunConfig* run_params, const vec3i start_idx)
{
//Grid dimensions
CUDA_ERRCHK( hipMemcpyToSymbol(d_nx, &(cparams->nx), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ny, &(cparams->ny), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nz, &(cparams->nz), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_mx, &(cparams->mx), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_my, &(cparams->my), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_mz, &(cparams->mz), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nx_min, &(cparams->nx_min), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nx_max, &(cparams->nx_max), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ny_min, &(cparams->ny_min), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ny_max, &(cparams->ny_max), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nz_min, &(cparams->nz_min), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nz_max, &(cparams->nz_max), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSX, &(cparams->dsx), sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSY, &(cparams->dsy), sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSZ, &(cparams->dsz), sizeof(real)) );
const real dsx_offset = cparams->dsx*start_idx.x;
const real dsy_offset = cparams->dsy*start_idx.y;
const real dsz_offset = cparams->dsz*start_idx.z;
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSX_OFFSET, &dsx_offset, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSY_OFFSET, &dsy_offset, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSZ_OFFSET, &dsz_offset, sizeof(real)) );
const real xorig = XORIG;
const real yorig = YORIG;
const real zorig = ZORIG;
CUDA_ERRCHK( hipMemcpyToSymbol(d_XORIG, &xorig, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_YORIG, &yorig, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ZORIG, &zorig, sizeof(real)) );
//Diff constants
const real diff1_dx = 1.0/(60.0*cparams->dsx);
const real diff1_dy = 1.0/(60.0*cparams->dsy);
const real diff1_dz = 1.0/(60.0*cparams->dsz);
const real diff2_dx = 1.0/(180.0*cparams->dsx*cparams->dsx);
const real diff2_dy = 1.0/(180.0*cparams->dsy*cparams->dsy);
const real diff2_dz = 1.0/(180.0*cparams->dsz*cparams->dsz);
const real diffmn_dxdy = (1.0/720.0)*(1.0/cparams->dsx)*(1.0/cparams->dsy);
const real diffmn_dydz = (1.0/720.0)*(1.0/cparams->dsy)*(1.0/cparams->dsz);
const real diffmn_dxdz = (1.0/720.0)*(1.0/cparams->dsz)*(1.0/cparams->dsx);
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF1_DX_DIV, &diff1_dx, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF1_DY_DIV, &diff1_dy, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF1_DZ_DIV, &diff1_dz, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF2_DX_DIV, &diff2_dx, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF2_DY_DIV, &diff2_dy, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF2_DZ_DIV, &diff2_dz, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFFMN_DXDY_DIV, &diffmn_dxdy, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFFMN_DYDZ_DIV, &diffmn_dydz, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFFMN_DXDZ_DIV, &diffmn_dxdz, sizeof(real)) );
//Viscosity
CUDA_ERRCHK( hipMemcpyToSymbol(d_NU_VISC, &(run_params->nu_visc), sizeof(real)) );
//Speed of sound
const real cs2_sound = pow(run_params->cs_sound, 2.0);
CUDA_ERRCHK( hipMemcpyToSymbol(d_CS2_SOUND, &cs2_sound, sizeof(real)) );
//Induction
CUDA_ERRCHK( hipMemcpyToSymbol(d_ETA, &(run_params->eta), sizeof(real)) );
}
void init_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst, CParamConfig* cparams)
{
//Print the GPU configuration
print_gpu_config();
const size_t grid_size_bytes = sizeof(real) * cparams->mx * cparams->my * cparams->mz;
//Init device arrays
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( hipMalloc(&(d_grid->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( hipMemset(d_grid->arr[i], INT_MAX, grid_size_bytes) );
CUDA_ERRCHK( hipMalloc(&(d_grid_dst->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( hipMemset(d_grid_dst->arr[i], INT_MAX, grid_size_bytes) );
}
}
void destroy_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst)
{
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( hipFree(d_grid->arr[i]) );
CUDA_ERRCHK( hipFree(d_grid_dst->arr[i]) );
}
}
void load_grid_cuda_core(Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx, Grid* h_grid, CParamConfig* h_cparams)
{
const size_t grid_size_bytes = sizeof(real)* d_cparams->mx * d_cparams->my * d_cparams->mz;
const size_t slice_size = h_cparams->mx*h_cparams->my;
for (int w=0; w < NUM_ARRS; ++w) {
CUDA_ERRCHK( hipMemcpyAsync(&(d_grid->arr[w][0]), &(h_grid->arr[w][h_start_idx->z*slice_size]), grid_size_bytes, hipMemcpyHostToDevice) );//NOTE: if stream not specified, uses the default stream->non-async behaviour
}
}
void store_grid_cuda_core(Grid* h_grid, CParamConfig* h_cparams, Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx)
{
const size_t grid_size_bytes = sizeof(real)* d_cparams->mx * d_cparams->my * d_cparams->nz;
const size_t slice_size = h_cparams->mx * h_cparams->my;
const size_t z_offset = BOUND_SIZE * slice_size;
for (int w=0; w < NUM_ARRS; ++w) {
CUDA_ERRCHK( hipMemcpyAsync(&(h_grid->arr[w][z_offset + h_start_idx->z*slice_size]), &(d_grid->arr[w][z_offset]), grid_size_bytes, hipMemcpyDeviceToHost) ); //NOTE: if stream not specified, uses the default stream->non-async behaviour
}
}
void store_slice_cuda_core(Slice* h_slice, CParamConfig* h_cparams, RunConfig* h_run_params, Slice* d_slice, CParamConfig* d_cparams, vec3i* h_start_idx)
{
if (h_run_params->slice_axis != 'z') CRASH("Slice axis other that z not yet supported!");
Slice buffer;
slice_malloc(&buffer, d_cparams, h_run_params);
hipStream_t stream;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
const size_t slice_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my;
for (int w=0; w < NUM_SLICES; ++w)
CUDA_ERRCHK( hipMemcpyAsync(buffer.arr[w], d_slice->arr[w], slice_size_bytes, hipMemcpyDeviceToHost, stream) );
const size_t row_size_bytes = sizeof(real) * d_cparams->nx;
for (int w=0; w < NUM_SLICES; ++w)
for (int j=d_cparams->ny_min; j < d_cparams->ny_max; ++j)
memcpy(&h_slice->arr[w][h_cparams->nx_min + (j+h_start_idx->y)*h_cparams->mx],
&buffer.arr[w][d_cparams->nx_min + j*d_cparams->mx], row_size_bytes);
hipStreamDestroy(stream);
slice_free(&buffer);
}
void init_halo_cuda_core(real* d_halo)
{
printf("init_halo_cuda_core\n");
/*
* Allocate memory for d_halo here
* CUDA_ERRCHK( hipMalloc(....));
*/
}
void destroy_halo_cuda_core(real* d_halo)
{
printf("destroy_halo_cuda_core\n");
/*
* Free d_halo here
* CUDA_ERRCHK( hipFree(d_halo) );
*/
}
void load_outer_halo_cuda_core(Grid* d_grid, real* d_halobuffer, CParamConfig* d_cparams,
Grid* h_grid, real* h_halobuffer, CParamConfig* h_cparams, vec3i* h_start_idx)
{
printf("load_outer_halo_cuda_core\n");
/*
Load outer halo from h_halobuffer to d_halobuffer (copyouterhalostodevice())
and update the halo of the device (fillhalos)
*/
}
void store_internal_halo_cuda_core(Grid* h_grid, real* h_halobuffer, CParamConfig* h_cparams, vec3i* h_start_idx,
Grid* d_grid, real* d_halobuffer, CParamConfig* d_cparams)
{
printf("store_internal_halo_cuda_core\n");
/*
Store internal halos in d_halobuffer to host memory in h_halobuffer/h_grid
*/
}
| 92ce3d65081ccc05de99bc7f1c76a40aa8216243.cu | #include "cuda_core.cuh"
#define INCLUDED_FROM_DCONST_DEFINER
#include "dconsts_core.cuh"
#include "errorhandler_cuda.cuh"
#include "common/config.h"
void print_gpu_config()
{
int n_devices;
if (cudaGetDeviceCount(&n_devices) != cudaSuccess) CRASH("No CUDA devices found!");
printf("Num CUDA devices found: %u\n", n_devices);
int initial_device;
cudaGetDevice(&initial_device);
for (int i = 0; i < n_devices; i++) {
cudaSetDevice(i);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
printf("--------------------------------------------------\n");
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", props.name);
printf(" Compute capability: %d.%d\n", props.major, props.minor);
printf(" Global memory\n");
printf(" Memory Clock Rate (MHz): %d\n", props.memoryClockRate / (1000));
printf(" Memory Bus Width (bits): %d\n", props.memoryBusWidth);
printf(" Peak Memory Bandwidth (GiB/s): %f\n", 2.0*props.memoryClockRate*(props.memoryBusWidth/8)/(1024*1024));
printf(" ECC enabled: %d\n", props.ECCEnabled);
//Memory usage
size_t free_bytes, total_bytes;
CUDA_ERRCHK( cudaMemGetInfo(&free_bytes, &total_bytes) );
const size_t used_bytes = total_bytes - free_bytes;
printf(" Total global mem: %.2f GiB\n", props.totalGlobalMem / (1024.0*1024*1024));
printf(" Gmem used (GiB): %.2f\n", used_bytes / (1024.0*1024*1024));
printf(" Gmem memory free (GiB): %.2f\n", free_bytes / (1024.0*1024*1024));
printf(" Gmem memory total (GiB): %.2f\n", total_bytes / (1024.0*1024*1024));
printf(" Caches\n");
printf(" L2 size: %d KiB\n", props.l2CacheSize / (1024));
printf(" Total const mem: %ld KiB\n", props.totalConstMem / (1024));
printf(" Shared mem per block: %ld KiB\n", props.sharedMemPerBlock / (1024));
printf(" Other\n");
printf(" Warp size: %d\n", props.warpSize);
//printf(" Single to double perf. ratio: %dx\n", props.singleToDoublePrecisionPerfRatio); //Not supported with older CUDA versions
printf(" Stream priorities supported: %d\n", props.streamPrioritiesSupported);
printf("--------------------------------------------------\n");
}
cudaSetDevice(initial_device);
}
void load_forcing_dconsts_cuda_core(ForcingParams* forcing_params)
{
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_ENABLED, &forcing_params->forcing_enabled, sizeof(bool)) );
//Copy forcing coefficients to the device's constant memory
const size_t k_idx = forcing_params->k_idx;
CUDA_ERRCHK( cudaMemcpyToSymbol(d_KK_VEC_X, &forcing_params->kk_x[k_idx], sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_KK_VEC_Y, &forcing_params->kk_y[k_idx], sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_KK_VEC_Z, &forcing_params->kk_z[k_idx], sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_KK_PART_X, &forcing_params->kk_part_x, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_KK_PART_Y, &forcing_params->kk_part_y, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_KK_PART_Z, &forcing_params->kk_part_z, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_PHI, &forcing_params->phi, sizeof(real)) );
}
void load_hydro_dconsts_cuda_core(CParamConfig* cparams, RunConfig* run_params, const vec3i start_idx)
{
//Grid dimensions
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nx, &(cparams->nx), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ny, &(cparams->ny), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nz, &(cparams->nz), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_mx, &(cparams->mx), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_my, &(cparams->my), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_mz, &(cparams->mz), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nx_min, &(cparams->nx_min), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nx_max, &(cparams->nx_max), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ny_min, &(cparams->ny_min), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ny_max, &(cparams->ny_max), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nz_min, &(cparams->nz_min), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nz_max, &(cparams->nz_max), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSX, &(cparams->dsx), sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSY, &(cparams->dsy), sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSZ, &(cparams->dsz), sizeof(real)) );
const real dsx_offset = cparams->dsx*start_idx.x;
const real dsy_offset = cparams->dsy*start_idx.y;
const real dsz_offset = cparams->dsz*start_idx.z;
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSX_OFFSET, &dsx_offset, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSY_OFFSET, &dsy_offset, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSZ_OFFSET, &dsz_offset, sizeof(real)) );
const real xorig = XORIG;
const real yorig = YORIG;
const real zorig = ZORIG;
CUDA_ERRCHK( cudaMemcpyToSymbol(d_XORIG, &xorig, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_YORIG, &yorig, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ZORIG, &zorig, sizeof(real)) );
//Diff constants
const real diff1_dx = 1.0/(60.0*cparams->dsx);
const real diff1_dy = 1.0/(60.0*cparams->dsy);
const real diff1_dz = 1.0/(60.0*cparams->dsz);
const real diff2_dx = 1.0/(180.0*cparams->dsx*cparams->dsx);
const real diff2_dy = 1.0/(180.0*cparams->dsy*cparams->dsy);
const real diff2_dz = 1.0/(180.0*cparams->dsz*cparams->dsz);
const real diffmn_dxdy = (1.0/720.0)*(1.0/cparams->dsx)*(1.0/cparams->dsy);
const real diffmn_dydz = (1.0/720.0)*(1.0/cparams->dsy)*(1.0/cparams->dsz);
const real diffmn_dxdz = (1.0/720.0)*(1.0/cparams->dsz)*(1.0/cparams->dsx);
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF1_DX_DIV, &diff1_dx, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF1_DY_DIV, &diff1_dy, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF1_DZ_DIV, &diff1_dz, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF2_DX_DIV, &diff2_dx, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF2_DY_DIV, &diff2_dy, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF2_DZ_DIV, &diff2_dz, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFFMN_DXDY_DIV, &diffmn_dxdy, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFFMN_DYDZ_DIV, &diffmn_dydz, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFFMN_DXDZ_DIV, &diffmn_dxdz, sizeof(real)) );
//Viscosity
CUDA_ERRCHK( cudaMemcpyToSymbol(d_NU_VISC, &(run_params->nu_visc), sizeof(real)) );
//Speed of sound
const real cs2_sound = pow(run_params->cs_sound, 2.0);
CUDA_ERRCHK( cudaMemcpyToSymbol(d_CS2_SOUND, &cs2_sound, sizeof(real)) );
//Induction
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ETA, &(run_params->eta), sizeof(real)) );
}
void init_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst, CParamConfig* cparams)
{
//Print the GPU configuration
print_gpu_config();
const size_t grid_size_bytes = sizeof(real) * cparams->mx * cparams->my * cparams->mz;
//Init device arrays
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( cudaMalloc(&(d_grid->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( cudaMemset(d_grid->arr[i], INT_MAX, grid_size_bytes) );
CUDA_ERRCHK( cudaMalloc(&(d_grid_dst->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( cudaMemset(d_grid_dst->arr[i], INT_MAX, grid_size_bytes) );
}
}
void destroy_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst)
{
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( cudaFree(d_grid->arr[i]) );
CUDA_ERRCHK( cudaFree(d_grid_dst->arr[i]) );
}
}
void load_grid_cuda_core(Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx, Grid* h_grid, CParamConfig* h_cparams)
{
const size_t grid_size_bytes = sizeof(real)* d_cparams->mx * d_cparams->my * d_cparams->mz;
const size_t slice_size = h_cparams->mx*h_cparams->my;
for (int w=0; w < NUM_ARRS; ++w) {
CUDA_ERRCHK( cudaMemcpyAsync(&(d_grid->arr[w][0]), &(h_grid->arr[w][h_start_idx->z*slice_size]), grid_size_bytes, cudaMemcpyHostToDevice) );//NOTE: if stream not specified, uses the default stream->non-async behaviour
}
}
void store_grid_cuda_core(Grid* h_grid, CParamConfig* h_cparams, Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx)
{
const size_t grid_size_bytes = sizeof(real)* d_cparams->mx * d_cparams->my * d_cparams->nz;
const size_t slice_size = h_cparams->mx * h_cparams->my;
const size_t z_offset = BOUND_SIZE * slice_size;
for (int w=0; w < NUM_ARRS; ++w) {
CUDA_ERRCHK( cudaMemcpyAsync(&(h_grid->arr[w][z_offset + h_start_idx->z*slice_size]), &(d_grid->arr[w][z_offset]), grid_size_bytes, cudaMemcpyDeviceToHost) ); //NOTE: if stream not specified, uses the default stream->non-async behaviour
}
}
void store_slice_cuda_core(Slice* h_slice, CParamConfig* h_cparams, RunConfig* h_run_params, Slice* d_slice, CParamConfig* d_cparams, vec3i* h_start_idx)
{
if (h_run_params->slice_axis != 'z') CRASH("Slice axis other that z not yet supported!");
Slice buffer;
slice_malloc(&buffer, d_cparams, h_run_params);
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
const size_t slice_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my;
for (int w=0; w < NUM_SLICES; ++w)
CUDA_ERRCHK( cudaMemcpyAsync(buffer.arr[w], d_slice->arr[w], slice_size_bytes, cudaMemcpyDeviceToHost, stream) );
const size_t row_size_bytes = sizeof(real) * d_cparams->nx;
for (int w=0; w < NUM_SLICES; ++w)
for (int j=d_cparams->ny_min; j < d_cparams->ny_max; ++j)
memcpy(&h_slice->arr[w][h_cparams->nx_min + (j+h_start_idx->y)*h_cparams->mx],
&buffer.arr[w][d_cparams->nx_min + j*d_cparams->mx], row_size_bytes);
cudaStreamDestroy(stream);
slice_free(&buffer);
}
void init_halo_cuda_core(real* d_halo)
{
printf("init_halo_cuda_core\n");
/*
* Allocate memory for d_halo here
* CUDA_ERRCHK( cudaMalloc(....));
*/
}
void destroy_halo_cuda_core(real* d_halo)
{
printf("destroy_halo_cuda_core\n");
/*
* Free d_halo here
* CUDA_ERRCHK( cudaFree(d_halo) );
*/
}
void load_outer_halo_cuda_core(Grid* d_grid, real* d_halobuffer, CParamConfig* d_cparams,
Grid* h_grid, real* h_halobuffer, CParamConfig* h_cparams, vec3i* h_start_idx)
{
printf("load_outer_halo_cuda_core\n");
/*
Load outer halo from h_halobuffer to d_halobuffer (copyouterhalostodevice())
and update the halo of the device (fillhalos)
*/
}
void store_internal_halo_cuda_core(Grid* h_grid, real* h_halobuffer, CParamConfig* h_cparams, vec3i* h_start_idx,
Grid* d_grid, real* d_halobuffer, CParamConfig* d_cparams)
{
printf("store_internal_halo_cuda_core\n");
/*
Store internal halos in d_halobuffer to host memory in h_halobuffer/h_grid
*/
}
|
3f8432b4b0fa9ee4e5a670323e27fe6aa261b94f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/unpack.cuh"
template <typename T>
__global__ void Unpack(const int size, const int output_num,
const int dims_after_axis, T** outputs, const T* input) {
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int cycle = pos / (output_num * dims_after_axis);
int cur_output_index = pos % (output_num * dims_after_axis) / dims_after_axis;
int local_index = pos % (output_num * dims_after_axis) % dims_after_axis;
outputs[cur_output_index][cycle * dims_after_axis + local_index] = input[pos];
}
return;
}
template <typename T>
void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, T** outputs, const T* input,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( Unpack), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, output_num,
dims_after_axis, outputs, input);
return;
}
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, float** outputs, const float* input,
hipStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, half** outputs, const half* input,
hipStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, int** outputs, const int* input,
hipStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, int16_t** outputs, const int16_t* input,
hipStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, unsigned char** outputs, const unsigned char* input,
hipStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, bool** outputs, const bool* input,
hipStream_t cuda_stream);
| 3f8432b4b0fa9ee4e5a670323e27fe6aa261b94f.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/unpack.cuh"
template <typename T>
__global__ void Unpack(const int size, const int output_num,
const int dims_after_axis, T** outputs, const T* input) {
for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int cycle = pos / (output_num * dims_after_axis);
int cur_output_index = pos % (output_num * dims_after_axis) / dims_after_axis;
int local_index = pos % (output_num * dims_after_axis) % dims_after_axis;
outputs[cur_output_index][cycle * dims_after_axis + local_index] = input[pos];
}
return;
}
template <typename T>
void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, T** outputs, const T* input,
cudaStream_t cuda_stream) {
Unpack<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, output_num,
dims_after_axis, outputs, input);
return;
}
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, float** outputs, const float* input,
cudaStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, half** outputs, const half* input,
cudaStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, int** outputs, const int* input,
cudaStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, int16_t** outputs, const int16_t* input,
cudaStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, unsigned char** outputs, const unsigned char* input,
cudaStream_t cuda_stream);
template void UnpackKernel(const int size, const int output_num,
const int dims_after_axis, bool** outputs, const bool* input,
cudaStream_t cuda_stream);
|
de399991c6c2b502fd971f88120aad7bcf72f7df.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hipfftXt.h>
/*
* An example usage of the Multi-GPU cuFFT XT library introduced in CUDA 6. This
* example performs a 1D forward FFT across all devices detected in the system.
*/
/*
* Create N fake samplings along the function cos(x). These samplings will be
* stored as single-precision floating-point values.
*/
void generate_fake_samples(int N, float **out)
{
int i;
float *result = (float *)malloc(sizeof(float) * N);
double delta = M_PI / 4.0;
for (i = 0; i < N; i++)
{
result[i] = cos(i * delta);
}
*out = result;
}
/*
* Convert a real-valued vector r of length Nto a complex-valued vector.
*/
void real_to_complex(float *r, hipfftComplex **complx, int N)
{
int i;
(*complx) = (hipfftComplex *)malloc(sizeof(hipfftComplex) * N);
for (i = 0; i < N; i++)
{
(*complx)[i].x = r[i];
(*complx)[i].y = 0;
}
}
/*
* Retrieve device IDs for all CUDA devices in the current system.
*/
int getAllGpus(int **gpus)
{
int i;
int nGpus;
CHECK(hipGetDeviceCount(&nGpus));
*gpus = (int *)malloc(sizeof(int) * nGpus);
for (i = 0; i < nGpus; i++)
{
(*gpus)[i] = i;
}
return nGpus;
}
int main(int argc, char **argv)
{
int i;
int N = 1024;
float *samples;
hipfftComplex *complexSamples;
int *gpus;
size_t *workSize;
hipfftHandle plan = 0;
cudaLibXtDesc *dComplexSamples;
int nGPUs = getAllGpus(&gpus);
nGPUs = nGPUs > 2 ? 2 : nGPUs;
workSize = (size_t *)malloc(sizeof(size_t) * nGPUs);
// Setup the cuFFT Multi-GPU plan
CHECK_CUFFT(hipfftCreate(&plan));
// CHECK_CUFFT(hipfftPlan1d(&plan, N, HIPFFT_C2C, 1));
CHECK_CUFFT(cufftXtSetGPUs(plan, 2, gpus));
CHECK_CUFFT(hipfftMakePlan1d(plan, N, HIPFFT_C2C, 1, workSize));
// Generate inputs
generate_fake_samples(N, &samples);
real_to_complex(samples, &complexSamples, N);
hipfftComplex *complexFreq = (hipfftComplex *)malloc(
sizeof(hipfftComplex) * N);
// Allocate memory across multiple GPUs and transfer the inputs into it
CHECK_CUFFT(cufftXtMalloc(plan, &dComplexSamples, CUFFT_XT_FORMAT_INPLACE));
CHECK_CUFFT(cufftXtMemcpy(plan, dComplexSamples, complexSamples,
CUFFT_COPY_HOST_TO_DEVICE));
// Execute a complex-to-complex 1D FFT across multiple GPUs
CHECK_CUFFT(cufftXtExecDescriptorC2C(plan, dComplexSamples, dComplexSamples,
HIPFFT_FORWARD));
// Retrieve the results from multiple GPUs into host memory
CHECK_CUFFT(cufftXtMemcpy(plan, complexSamples, dComplexSamples,
CUFFT_COPY_DEVICE_TO_HOST));
printf("Fourier Coefficients:\n");
for (i = 0; i < 30; i++)
{
printf(" %d: (%2.4f, %2.4f)\n", i + 1, complexFreq[i].x,
complexFreq[i].y);
}
free(gpus);
free(samples);
free(complexSamples);
free(complexFreq);
free(workSize);
CHECK_CUFFT(cufftXtFree(dComplexSamples));
CHECK_CUFFT(hipfftDestroy(plan)); | de399991c6c2b502fd971f88120aad7bcf72f7df.cu | #include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cufftXt.h>
/*
* An example usage of the Multi-GPU cuFFT XT library introduced in CUDA 6. This
* example performs a 1D forward FFT across all devices detected in the system.
*/
/*
* Create N fake samplings along the function cos(x). These samplings will be
* stored as single-precision floating-point values.
*/
void generate_fake_samples(int N, float **out)
{
int i;
float *result = (float *)malloc(sizeof(float) * N);
double delta = M_PI / 4.0;
for (i = 0; i < N; i++)
{
result[i] = cos(i * delta);
}
*out = result;
}
/*
* Convert a real-valued vector r of length Nto a complex-valued vector.
*/
void real_to_complex(float *r, cufftComplex **complx, int N)
{
int i;
(*complx) = (cufftComplex *)malloc(sizeof(cufftComplex) * N);
for (i = 0; i < N; i++)
{
(*complx)[i].x = r[i];
(*complx)[i].y = 0;
}
}
/*
* Retrieve device IDs for all CUDA devices in the current system.
*/
int getAllGpus(int **gpus)
{
int i;
int nGpus;
CHECK(cudaGetDeviceCount(&nGpus));
*gpus = (int *)malloc(sizeof(int) * nGpus);
for (i = 0; i < nGpus; i++)
{
(*gpus)[i] = i;
}
return nGpus;
}
int main(int argc, char **argv)
{
int i;
int N = 1024;
float *samples;
cufftComplex *complexSamples;
int *gpus;
size_t *workSize;
cufftHandle plan = 0;
cudaLibXtDesc *dComplexSamples;
int nGPUs = getAllGpus(&gpus);
nGPUs = nGPUs > 2 ? 2 : nGPUs;
workSize = (size_t *)malloc(sizeof(size_t) * nGPUs);
// Setup the cuFFT Multi-GPU plan
CHECK_CUFFT(cufftCreate(&plan));
// CHECK_CUFFT(cufftPlan1d(&plan, N, CUFFT_C2C, 1));
CHECK_CUFFT(cufftXtSetGPUs(plan, 2, gpus));
CHECK_CUFFT(cufftMakePlan1d(plan, N, CUFFT_C2C, 1, workSize));
// Generate inputs
generate_fake_samples(N, &samples);
real_to_complex(samples, &complexSamples, N);
cufftComplex *complexFreq = (cufftComplex *)malloc(
sizeof(cufftComplex) * N);
// Allocate memory across multiple GPUs and transfer the inputs into it
CHECK_CUFFT(cufftXtMalloc(plan, &dComplexSamples, CUFFT_XT_FORMAT_INPLACE));
CHECK_CUFFT(cufftXtMemcpy(plan, dComplexSamples, complexSamples,
CUFFT_COPY_HOST_TO_DEVICE));
// Execute a complex-to-complex 1D FFT across multiple GPUs
CHECK_CUFFT(cufftXtExecDescriptorC2C(plan, dComplexSamples, dComplexSamples,
CUFFT_FORWARD));
// Retrieve the results from multiple GPUs into host memory
CHECK_CUFFT(cufftXtMemcpy(plan, complexSamples, dComplexSamples,
CUFFT_COPY_DEVICE_TO_HOST));
printf("Fourier Coefficients:\n");
for (i = 0; i < 30; i++)
{
printf(" %d: (%2.4f, %2.4f)\n", i + 1, complexFreq[i].x,
complexFreq[i].y);
}
free(gpus);
free(samples);
free(complexSamples);
free(complexFreq);
free(workSize);
CHECK_CUFFT(cufftXtFree(dComplexSamples));
CHECK_CUFFT(cufftDestroy(plan)); |
26c5e7fb6d736a933913b8df197d2708f99a962f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include "../COMMON/commons.cuh"
#include <stdio.h>
struct Sq{
__device__ __host__ float operator()(const float& a){
return a*a;
}
};
int main(int argc, char **argv){
// Create a device_vector of float (single precision) elements, with 100 elements
thrust::device_vector<float> v1(20);
// Fill it with a sequence starting from 0 with step 2
thrust::sequence(v1.begin(), v1.end(), 0, 2);
printf("Sequence generated\n");
// Use a in-place unary transformation algorithm already defined in thrust
// Syntax is transform( beginning_of_object_to_transform, beginning_of_object_to_transform,
// beginning_of_where_to_put_the_result, operation_to_use )
//thrust::transform(v1.begin(), v1.end(), v1.begin(), thrust::negate<float>());
thrust::transform(v1.begin(), v1.end(), v1.begin(), Sq());
printf("Sequence transformed\n");
// Print the result
hipLaunchKernelGGL(( print_dev), dim3(1),dim3(1), 0, 0, thrust::raw_pointer_cast(v1.data()), static_cast<int>(v1.size()));
printf("Sequence printed\n");
// Check for errors
CUDA_CHECK_ERROR();
// Synchronize to be sure to finish operations before program exits
SYNCGPU();
return 0;
}
| 26c5e7fb6d736a933913b8df197d2708f99a962f.cu | #include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include "../COMMON/commons.cuh"
#include <stdio.h>
struct Sq{
__device__ __host__ float operator()(const float& a){
return a*a;
}
};
int main(int argc, char **argv){
// Create a device_vector of float (single precision) elements, with 100 elements
thrust::device_vector<float> v1(20);
// Fill it with a sequence starting from 0 with step 2
thrust::sequence(v1.begin(), v1.end(), 0, 2);
printf("Sequence generated\n");
// Use a in-place unary transformation algorithm already defined in thrust
// Syntax is transform( beginning_of_object_to_transform, beginning_of_object_to_transform,
// beginning_of_where_to_put_the_result, operation_to_use )
//thrust::transform(v1.begin(), v1.end(), v1.begin(), thrust::negate<float>());
thrust::transform(v1.begin(), v1.end(), v1.begin(), Sq());
printf("Sequence transformed\n");
// Print the result
print_dev<<<1,1>>>(thrust::raw_pointer_cast(v1.data()), static_cast<int>(v1.size()));
printf("Sequence printed\n");
// Check for errors
CUDA_CHECK_ERROR();
// Synchronize to be sure to finish operations before program exits
SYNCGPU();
return 0;
}
|
46938db963dfe2bd7600f6506f8eadd61a8a6b19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAlib.cuh"
#include "WAVFilter.h"
#include <chrono>
#define BLOCK_SIZE 64
// reduce bit value to half means -6dB
__global__ void FilterCompressorBlock(char* d_in, char* d_out, double threshold, double ratio, int bitperSample)
{
double max_dB = 6.02 * bitperSample; // approx. value of SNR. https://en.wikipedia.org/wiki/Audio_bit_depth#Bit_rate_and_file_size
int byterate = bitperSample / 8;
short d_data = 0;
int x = (blockIdx.x * blockDim.x + threadIdx.x) * byterate;
// Load one element per thread from device memory and store it
memcpy(&d_data, (d_in + x), byterate);
double dB;
if (d_data < 0)
dB = max_dB + 10 * log10((double)(-1 * d_data)) - 3.012 * bitperSample;
else
dB = max_dB + 10 * log10((double)(d_data)) - 3.012 * bitperSample;
if (dB > threshold)
{
dB = ratio * (dB - threshold) + threshold;
dB += (3.012 * bitperSample - max_dB);
dB /= 10;
short v = (short)pow(10.0, dB);
d_data = (d_data < 0) ? (-1 * v) : v;
}
memcpy((d_out + x), &d_data, byterate);
}
void FilterCompressor(Audio_WAV& origin, bool useCUDA, double threshold, double ratio)
{
WAV_HEADER origin_header = origin.get_header();
size_t memSize = origin_header.Subchunk2Size;
char* origin_bytes = origin.get_audio();
int bitperSample = origin_header.bitsPerSample;
int byterate = bitperSample / 8;
if (useCUDA)
{
//pointer for device
char* d_in, * d_out;
int numBlocks = (memSize / BLOCK_SIZE) + 1; //celling
int sharedMemSize = BLOCK_SIZE * byterate; //number byte for each thread
hipMalloc((void**)& d_in, memSize);
hipMalloc((void**)& d_out, memSize);
hipMemcpy(d_in, origin_bytes, memSize, hipMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(BLOCK_SIZE / byterate);
FilterCompressorBlock << < dimGrid, dimBlock, sharedMemSize >> > (d_in, d_out, threshold, ratio, bitperSample);
hipDeviceSynchronize();
memset(origin_bytes, 0, memSize);
char* origin_archive = new char[memSize];
hipMemcpy(origin_archive, d_out, memSize, hipMemcpyDeviceToHost);
origin.set_audio(origin_archive);
hipFree(d_in);
hipFree(d_out);
}
else
{
double max_dB = 6.02 * bitperSample; // approx. value of SNR. https://en.wikipedia.org/wiki/Audio_bit_depth#Bit_rate_and_file_size
for (int i = 0; i < memSize; i += byterate)
{
short value;
memcpy(&value, &origin_bytes[i], byterate);
double dB;
if (value < 0)
dB = max_dB + 10 * log10((double)(-1 * value)) - 3.012 * bitperSample;
else
dB = max_dB + 10 * log10((double)(value)) - 3.012 * bitperSample;
if (dB > threshold)
{
dB = ratio * (dB - threshold) + threshold;
dB += (3.012 * bitperSample - max_dB);
dB /= 10;
short v = (short)pow(10, dB);
value = (value < 0) ? (-1 * v) : v;
}
memcpy(&origin_bytes[i], &value, byterate);
}
}
}
| 46938db963dfe2bd7600f6506f8eadd61a8a6b19.cu | #include "CUDAlib.cuh"
#include "WAVFilter.h"
#include <chrono>
#define BLOCK_SIZE 64
// reduce bit value to half means -6dB
__global__ void FilterCompressorBlock(char* d_in, char* d_out, double threshold, double ratio, int bitperSample)
{
double max_dB = 6.02 * bitperSample; // approx. value of SNR. https://en.wikipedia.org/wiki/Audio_bit_depth#Bit_rate_and_file_size
int byterate = bitperSample / 8;
short d_data = 0;
int x = (blockIdx.x * blockDim.x + threadIdx.x) * byterate;
// Load one element per thread from device memory and store it
memcpy(&d_data, (d_in + x), byterate);
double dB;
if (d_data < 0)
dB = max_dB + 10 * log10((double)(-1 * d_data)) - 3.012 * bitperSample;
else
dB = max_dB + 10 * log10((double)(d_data)) - 3.012 * bitperSample;
if (dB > threshold)
{
dB = ratio * (dB - threshold) + threshold;
dB += (3.012 * bitperSample - max_dB);
dB /= 10;
short v = (short)pow(10.0, dB);
d_data = (d_data < 0) ? (-1 * v) : v;
}
memcpy((d_out + x), &d_data, byterate);
}
void FilterCompressor(Audio_WAV& origin, bool useCUDA, double threshold, double ratio)
{
WAV_HEADER origin_header = origin.get_header();
size_t memSize = origin_header.Subchunk2Size;
char* origin_bytes = origin.get_audio();
int bitperSample = origin_header.bitsPerSample;
int byterate = bitperSample / 8;
if (useCUDA)
{
//pointer for device
char* d_in, * d_out;
int numBlocks = (memSize / BLOCK_SIZE) + 1; //celling
int sharedMemSize = BLOCK_SIZE * byterate; //number byte for each thread
cudaMalloc((void**)& d_in, memSize);
cudaMalloc((void**)& d_out, memSize);
cudaMemcpy(d_in, origin_bytes, memSize, cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(BLOCK_SIZE / byterate);
FilterCompressorBlock << < dimGrid, dimBlock, sharedMemSize >> > (d_in, d_out, threshold, ratio, bitperSample);
cudaThreadSynchronize();
memset(origin_bytes, 0, memSize);
char* origin_archive = new char[memSize];
cudaMemcpy(origin_archive, d_out, memSize, cudaMemcpyDeviceToHost);
origin.set_audio(origin_archive);
cudaFree(d_in);
cudaFree(d_out);
}
else
{
double max_dB = 6.02 * bitperSample; // approx. value of SNR. https://en.wikipedia.org/wiki/Audio_bit_depth#Bit_rate_and_file_size
for (int i = 0; i < memSize; i += byterate)
{
short value;
memcpy(&value, &origin_bytes[i], byterate);
double dB;
if (value < 0)
dB = max_dB + 10 * log10((double)(-1 * value)) - 3.012 * bitperSample;
else
dB = max_dB + 10 * log10((double)(value)) - 3.012 * bitperSample;
if (dB > threshold)
{
dB = ratio * (dB - threshold) + threshold;
dB += (3.012 * bitperSample - max_dB);
dB /= 10;
short v = (short)pow(10, dB);
value = (value < 0) ? (-1 * v) : v;
}
memcpy(&origin_bytes[i], &value, byterate);
}
}
}
|
326a22b51e1cc7c33740182478af638247977ff4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/block_array.h"
#include "helper_cuda.h"
#include <device_launch_parameters.h>
////////////////////
/// Device code
////////////////////
__global__
void BlockArrayResetKernel(
Block* blocks,
int block_count
) {
const uint block_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (block_idx < block_count) {
blocks[block_idx].Clear();
}
}
////////////////////
/// Host code
//////////////////////
__host__
BlockArray::BlockArray(uint block_count) {
Resize(block_count);
}
//BlockArray::~BlockArray() {
// Free();
//}
__host__
void BlockArray::Alloc(uint block_count) {
if (! is_allocated_on_gpu_) {
block_count_ = block_count;
checkCudaErrors(hipMalloc(&blocks_, sizeof(Block) * block_count));
is_allocated_on_gpu_ = true;
}
}
__host__
void BlockArray::Free() {
if (is_allocated_on_gpu_) {
checkCudaErrors(hipFree(blocks_));
block_count_ = 0;
blocks_ = NULL;
is_allocated_on_gpu_ = false;
}
}
__host__
void BlockArray::Resize(uint block_count) {
if (is_allocated_on_gpu_) {
Free();
}
Alloc(block_count);
Reset();
}
__host__
void BlockArray::Reset() {
const uint threads_per_block = 64;
if (block_count_ == 0) return;
// NOTE: this block is the parallel unit in CUDA, not the data structure Block
const uint blocks = (block_count_ + threads_per_block - 1) / threads_per_block;
const dim3 grid_size(blocks, 1);
const dim3 block_size(threads_per_block, 1);
hipLaunchKernelGGL(( BlockArrayResetKernel), dim3(grid_size), dim3(block_size), 0, 0, blocks_, block_count_);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
} | 326a22b51e1cc7c33740182478af638247977ff4.cu | #include "core/block_array.h"
#include "helper_cuda.h"
#include <device_launch_parameters.h>
////////////////////
/// Device code
////////////////////
__global__
void BlockArrayResetKernel(
Block* blocks,
int block_count
) {
const uint block_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (block_idx < block_count) {
blocks[block_idx].Clear();
}
}
////////////////////
/// Host code
//////////////////////
__host__
BlockArray::BlockArray(uint block_count) {
Resize(block_count);
}
//BlockArray::~BlockArray() {
// Free();
//}
__host__
void BlockArray::Alloc(uint block_count) {
if (! is_allocated_on_gpu_) {
block_count_ = block_count;
checkCudaErrors(cudaMalloc(&blocks_, sizeof(Block) * block_count));
is_allocated_on_gpu_ = true;
}
}
__host__
void BlockArray::Free() {
if (is_allocated_on_gpu_) {
checkCudaErrors(cudaFree(blocks_));
block_count_ = 0;
blocks_ = NULL;
is_allocated_on_gpu_ = false;
}
}
__host__
void BlockArray::Resize(uint block_count) {
if (is_allocated_on_gpu_) {
Free();
}
Alloc(block_count);
Reset();
}
__host__
void BlockArray::Reset() {
const uint threads_per_block = 64;
if (block_count_ == 0) return;
// NOTE: this block is the parallel unit in CUDA, not the data structure Block
const uint blocks = (block_count_ + threads_per_block - 1) / threads_per_block;
const dim3 grid_size(blocks, 1);
const dim3 block_size(threads_per_block, 1);
BlockArrayResetKernel<<<grid_size, block_size>>>(blocks_, block_count_);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
} |
9931d0b7ac49db16e04a7b3fd5eb3347c0d4993b.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "testlayers.h"
#include <NDArray.h>
#include <NDArrayFactory.h>
#include <Context.h>
#include <Node.h>
#include <graph/Variable.h>
#include <graph/VariableSpace.h>
#include <specials_cuda.h>
#include <TAD.h>
#include <MmulHelper.h>
#include <hip/hip_runtime.h>
using namespace nd4j;
using namespace nd4j::graph;
class CudaBasicsTests1 : public testing::Test {
public:
};
//////////////////////////////////////////////////////////////////////////
static hipError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) {
if(devicePtrs.size() != hostData.size())
throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !");
hipError_t cudaResult;
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult;
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult;
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
hipStream_t stream = *lc.getCudaStream();
for(int i = 0; i < devicePtrs.size(); ++i) {
cudaResult = hipMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult;
hipMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice, stream);
}
return cudaResult;
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, TestPairwise_1) {
// allocating host-side arrays
auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5});
auto z = NDArrayFactory::create<double>('c', { 5 }, {0,0,0,0,0});
auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 });
// making raw buffers
Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX;
hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo()));
ASSERT_EQ(0, res);
Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t));
hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream));
auto stream = reinterpret_cast<hipStream_t *>(&nativeStream);
hipMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), hipMemcpyHostToDevice, *stream);
hipMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), hipMemcpyHostToDevice, *stream);
LaunchContext lc(stream, nullptr, nullptr);
NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, z.shapeInfo(), devBufferPtrZ, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr);
res = hipStreamSynchronize(*stream);
ASSERT_EQ(0, res);
hipMemcpyAsync(z.buffer(), devBufferPtrZ, z.lengthOf() * x.sizeOfT(), hipMemcpyDeviceToHost, *stream);
res = hipStreamSynchronize(*stream);
ASSERT_EQ(0, res);
hipFree(devBufferPtrX);
hipFree(devBufferPtrZ);
hipFree(devShapePtrX);
for (int e = 0; e < z.lengthOf(); e++) {
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
}
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduceScalar_1) {
NDArray x1('c', {2,2}, {0, 1, 2, 3}, nd4j::DataType::INT32);
NDArray x2('c', {2,2}, {0.5, 1.5, -4.5, 3.5}, nd4j::DataType::BFLOAT16);
NDArray x3('c', {2,2}, {0, -1, 0, 1}, nd4j::DataType::BOOL);
NDArray scalar('c', {0}, {0}, nd4j::DataType::INT64);
NDArray exp1('c', {0}, {3}, nd4j::DataType::INT64);
NDArray exp2('c', {0}, {2}, nd4j::DataType::INT64);
NDArray exp3('c', {0}, {1}, nd4j::DataType::INT64);
void *dX1, *dX2, *dX3, *dZ;
Nd4jLong *dX1ShapeInfo, *dX2ShapeInfo, *dX3ShapeInfo, *dZShapeInfo;
hipError_t cudaResult;
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ), scalar.lengthOf() * scalar.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2ShapeInfo), shape::shapeInfoByteLength(x2.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZShapeInfo), shape::shapeInfoByteLength(scalar.getShapeInfo())); ASSERT_EQ(0, cudaResult);
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX1ShapeInfo, x1.getShapeInfo(), shape::shapeInfoByteLength(x1.getShapeInfo()), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX2ShapeInfo, x2.getShapeInfo(), shape::shapeInfoByteLength(x2.getShapeInfo()), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX3ShapeInfo, x3.getShapeInfo(), shape::shapeInfoByteLength(x3.getShapeInfo()), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dZShapeInfo, scalar.getShapeInfo(), shape::shapeInfoByteLength(scalar.getShapeInfo()), hipMemcpyHostToDevice, stream);
void* reductionPointer = nullptr;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, reductionPointer);
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc,
nd4j::indexreduce::IndexAbsoluteMax,
x1.buffer(), x1.getShapeInfo(),
dX1, dX1ShapeInfo,
nullptr,
scalar.buffer(), scalar.getShapeInfo(),
dZ, dZShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp1.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc,
nd4j::indexreduce::IndexAbsoluteMax,
nullptr, x2.getShapeInfo(),
dX2, dX2ShapeInfo,
nullptr,
nullptr, scalar.getShapeInfo(),
dZ, dZShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar.e<float>(0), 1e-5);
// *************************************
NativeOpExecutioner::execIndexReduceScalar(&lc,
nd4j::indexreduce::IndexAbsoluteMax,
nullptr, x3.getShapeInfo(),
dX3, dX3ShapeInfo,
nullptr,
nullptr, scalar.getShapeInfo(),
dZ, dZShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp3.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
hipFree(dX1); hipFree(dX2); hipFree(dX3); hipFree(dZ);
hipFree(dX1ShapeInfo); hipFree(dX2ShapeInfo); hipFree(dX3ShapeInfo); hipFree(dZShapeInfo);
/***************************************/
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3Scalar_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x1('c', {2,2}, {1,2,3,4}, nd4j::DataType::INT32);
NDArray x2('c', {2,2}, {-1,-2,-3,-4}, nd4j::DataType::INT32);
NDArray x3('c', {2,2}, {1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray x4('c', {2,2}, {1,2,3,4}, nd4j::DataType::DOUBLE);
NDArray exp1('c', {0}, {-30}, nd4j::DataType::FLOAT32);
NDArray exp2('c', {0}, {15}, nd4j::DataType::DOUBLE);
NDArray scalar1('c', {0}, {100}, nd4j::DataType::FLOAT32);
NDArray scalar2('c', {0}, {100}, nd4j::DataType::DOUBLE);
void *dX1, *dX2, *dX3, *dX4, *dZ1, *dZ2;
Nd4jLong *dX1ShapeInfo, *dX3ShapeInfo, *dZ1ShapeInfo, *dZ2ShapeInfo;
hipError_t cudaResult;
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX4), x4.lengthOf() * x4.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ1), scalar1.lengthOf() * scalar1.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ2), scalar2.lengthOf() * scalar2.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ1ShapeInfo), shape::shapeInfoByteLength(scalar1.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&dZ2ShapeInfo), shape::shapeInfoByteLength(scalar2.getShapeInfo())); ASSERT_EQ(0, cudaResult);
hipStream_t stream;
cudaResult = hipStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX4, x4.buffer(), x4.lengthOf() * x4.sizeOfT(), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX1ShapeInfo, x1.getShapeInfo(), shape::shapeInfoByteLength(x1.getShapeInfo()), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dX3ShapeInfo, x3.getShapeInfo(), shape::shapeInfoByteLength(x3.getShapeInfo()), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dZ1ShapeInfo, scalar1.getShapeInfo(), shape::shapeInfoByteLength(scalar1.getShapeInfo()), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dZ2ShapeInfo, scalar2.getShapeInfo(), shape::shapeInfoByteLength(scalar2.getShapeInfo()), hipMemcpyHostToDevice, stream);
/***************************************/
void* reductionPointer = nullptr;
int* allocationPointer = nullptr;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, reductionPointer, nullptr, allocationPointer);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, nd4j::reduce3::Dot,nullptr, x1.getShapeInfo(),dX1, dX1ShapeInfo, nullptr, nullptr, x2.getShapeInfo(),dX2, dX1ShapeInfo,nullptr, scalar1.getShapeInfo(),dZ1, dZ1ShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar1.buffer(), dZ1, scalar1.lengthOf() * scalar1.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp1.e<float>(0), scalar1.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, nd4j::reduce3::Dot,nullptr, x3.getShapeInfo(),dX3, dX3ShapeInfo, nullptr, nullptr, x4.getShapeInfo(),dX4, dX3ShapeInfo,nullptr, scalar2.getShapeInfo(),dZ2, dZ2ShapeInfo);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
hipMemcpyAsync(scalar2.buffer(), dZ2, scalar2.lengthOf() * scalar2.sizeOfT(), hipMemcpyDeviceToHost, stream);
cudaResult = hipStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar2.e<float>(0), 1e-5);
/***************************************/
hipFree(dX1); hipFree(dX2); hipFree(dX3); hipFree(dX4); hipFree(dZ1); hipFree(dZ2);
hipFree(dX1ShapeInfo); hipFree(dX3ShapeInfo); hipFree(dZ1ShapeInfo); hipFree(dZ2ShapeInfo);
/***************************************/
cudaResult = hipStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_1) {
NDArray x('c', {2,2}, {1,2,3,4}, nd4j::DataType::INT32);
NDArray y('c', {2,2}, {-1,-2,-3,-4}, nd4j::DataType::INT32);
NDArray exp('c', {0}, {-30}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0, 1};
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
nullptr, nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_2) {
NDArray x('c', {2,2}, {1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,2}, {1,2,3,4}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {15}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {0, 1};
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
nullptr, nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_3) {
NDArray x('c', {2,3}, {1,2,3,4,5,6}, nd4j::DataType::INT32);
NDArray y('c', {2,3}, {-6,-5,-4,-3,-2,-1}, nd4j::DataType::INT32);
NDArray exp('c', {3}, {-18,-20,-18}, nd4j::DataType::FLOAT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_4) {
NDArray x('c', {2,3}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2}, {9,22.5}, nd4j::DataType::DOUBLE);
NDArray z('c', {2}, {100,100}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_5) {
NDArray x('c', {2,2,3}, {1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::FLOAT32);
NDArray y('c', {2,2,3}, {1,2,3,4,5,6,7,8,9,10,11,12}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3}, {7.5, 10.5, 13.5, 25.5, 28.5, 31.5}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_1) {
NDArray x('c', {2,2}, {1,2,3,4}, nd4j::DataType::INT32);
NDArray y('c', {2,3}, {-1,1,-1,1,-1,1}, nd4j::DataType::INT32);
NDArray exp('c', {2,3}, {2,-2,2,2,-2,2}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4 -- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_2) {
NDArray x('c', {2,2}, {1,2,3,4}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2,3}, {6,6,6,9,9,9}, nd4j::DataType::DOUBLE);
NDArray z('c', {2,3}, {100,100,100,100,100,100,},nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_1) {
NDArray x('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
x.linspace(-2.); x.syncToDevice();
NDArray exp('c', {2}, {2, 2}, nd4j::DataType::INT64);
NDArray z('c', {2}, {100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, nd4j::indexreduce::IndexMax,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_2) {
NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
x.linspace(-2.f); x.syncToDevice();
NDArray exp('c', {2,5}, {11,11,11,11,11,11,11,11,11,11}, nd4j::DataType::INT64);
NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, nd4j::indexreduce::IndexMax,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_3) {
NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
x.linspace(-2.); x.syncToDevice();
NDArray exp('c', {3}, {39, 39, 39}, nd4j::DataType::INT64);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {0,2,3};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, nd4j::indexreduce::IndexMax,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3}, {0,1,2,3,4,5}, nd4j::DataType::INT64);
NDArray exp('c',{2,3}, {0,0,1,1,2,2}, nd4j::DataType::INT64);
NDArray scalar('c',{0}, {2}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::INT64);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, nd4j::scalar::Divide,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.getShapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_2) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3}, {-1,-2,-3,-4,-5,-6}, nd4j::DataType::INT64);
NDArray exp('c',{2,3}, {10,10,10,10,10,10}, nd4j::DataType::FLOAT32);
NDArray scalar('c',{0}, {10}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, nd4j::scalar::CopyPws,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.getShapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_3) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3,2}, {0,1,2,3,4,5,6,7,8,9,10,11}, nd4j::DataType::INT64);
NDArray scalars('c',{2,2}, {1,2,3,4}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3,2}, {0,0,2,1,4,2, 2,1,2,2,3,2}, nd4j::DataType::INT64);
NDArray z('c', {2,3,2}, {100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, nd4j::scalar::Divide,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalars.getShapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_1) {
NDArray x('c', {2,3}, {-1,-2,0,1,2,3}, nd4j::DataType::BFLOAT16);
NDArray scalar('c',{0}, {0}, nd4j::DataType::BFLOAT16);
NDArray exp('c',{2,3}, {0,0,0,1,1,1}, nd4j::DataType::BOOL);
NDArray z('c', {2,3}, {100,100,100,100,100,100,}, nd4j::DataType::BOOL);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, nd4j::scalar::GreaterThan,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.getShapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_2) {
NDArray x('c', {2,3}, {0,1,2,3,4,5}, nd4j::DataType::FLOAT32);
NDArray scalars('c',{2}, {-1,4}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3}, {1,1,1,0,0,1}, nd4j::DataType::BOOL);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::BOOL);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, nd4j::scalar::GreaterThan,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalars.getShapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray y('c', {3}, {10, 20, 30}, nd4j::DataType::INT64);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, nd4j::DataType::INT32);
x.linspace(0); x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Add,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_2) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::FLOAT32);
x.linspace(0); x.syncToDevice();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Add,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_1) {
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray y('c', {3}, {2, 12, 22}, nd4j::DataType::INT32);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,}, nd4j::DataType::BOOL);
NDArray exp('c', {2,3,4}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}, nd4j::DataType::BOOL);
x.linspace(1); x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(&lc, nd4j::broadcast::EqualTo,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_2) {
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100},nd4j::DataType::FLOAT32);
NDArray y('c', {2,4}, {1,10,10,15,20,20,20,24}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {2,3,4}, {1, 0, 0, 0,0, 0, 0, 0,0, 1, 0, 0,0, 0, 0, 0,0, 0, 0, 0,0, 0, 0, 1}, nd4j::DataType::BOOL);
x.linspace(1); x.syncToDevice();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(&lc, nd4j::broadcast::EqualTo,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseTransform_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, nd4j::DataType::INT32);
NDArray y('c', {4,2}, {0.1,0.2,0.3,0.4,1.5,0.6,0.7,1.8}, nd4j::DataType::DOUBLE);
NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {8}, {0,1,2,3,3,5,6,6}, nd4j::DataType::INT32);
x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseTransform(&lc, nd4j::pairwise::Subtract,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseBoolTransform_1) {
NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, nd4j::DataType::INT64);
NDArray y('c', {4,2}, {0,2,0,4,0,6,0,8}, nd4j::DataType::INT64);
NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {8}, {0,1,0,1,0,1,0,1}, nd4j::DataType::BOOL);
x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseBoolTransform(&lc, nd4j::pairwise::EqualTo,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_1) {
NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::DOUBLE);
NDArray z('c', {4}, {100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {4}, {0, 1.5, 2.5, 3.5}, nd4j::DataType::FLOAT32);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, nd4j::transform::Sqrt,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_2) {
NDArray x('c', {1,4}, {0, 4, 9, 16}, nd4j::DataType::INT64);
NDArray z('c', {2,2}, {100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2,2}, {0, 2, 3, 4}, nd4j::DataType::DOUBLE);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, nd4j::transform::Sqrt,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_1) {
NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::DOUBLE);
NDArray z('c', {4,1}, {100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {4,1}, {0, 2, 6, 12}, nd4j::DataType::INT32);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, nd4j::transform::Assign,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_2) {
NDArray x('c', {1,4}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::BFLOAT16);
NDArray z('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,2}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, nd4j::transform::Assign,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_1) {
NDArray x('c', {2,3}, {0,2,4,1,3,5}, nd4j::DataType::DOUBLE);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, nd4j::DataType::DOUBLE);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, nd4j::transform::CubeDerivative,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_2) {
NDArray x('c', {6}, {0,1,2,3,4,5}, nd4j::DataType::FLOAT32);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, nd4j::transform::CubeDerivative,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_1) {
NDArray x('c', {2,3}, {0,2.5,4.5,1.5,3.5,5.5}, nd4j::DataType::DOUBLE);
NDArray z('c', {1,6}, {100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {1,6}, {0,2.25,6.25,12.25,20.25,30.25}, nd4j::DataType::DOUBLE);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, nd4j::transform::Square,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_2) {
NDArray x('c', {6}, {0,1,2,3,4,5}, nd4j::DataType::INT32);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {3,2}, {0,1,4,9,16,25}, nd4j::DataType::INT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, nd4j::transform::Square,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_1) {
NDArray x('c', {2,3}, {0,2,4,-1,-3,-5}, nd4j::DataType::DOUBLE);
NDArray z('c', {1,6}, {100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {1,6}, {0,0,1,0,1,0}, nd4j::DataType::BOOL);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, nd4j::transform::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_2) {
NDArray x('c', {6}, {0,-1,2,-3,4,-5}, nd4j::DataType::INT32);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {3,2}, {0,0,1,0,1,0}, nd4j::DataType::BOOL);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, nd4j::transform::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {3}, {2.5, 6.5, 10.5}, nd4j::DataType::FLOAT32);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloat(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2,4}, {-1., 0., 1., 2.,11., 12., 13., 14.}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloat(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {3}, {20, 52, 84}, nd4j::DataType::INT32);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSame(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,4}, {-3., 0., 3., 6.,33., 36., 39., 42.}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSame(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {3}, {0, 1, 1}, nd4j::DataType::BOOL);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBool(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {2,4}, {1, 1, 1, 1, 0, 0, 0, 0}, nd4j::DataType::BOOL);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBool(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_1) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::INT64);
NDArray exp('c', {3}, {5,6,6}, nd4j::DataType::INT64);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLong(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_2) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::INT64);
NDArray exp('c', {2,4}, {3, 1, 3, 2, 2, 1, 2, 3}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLong(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {0}, {6.5}, nd4j::DataType::FLOAT32);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {6.5}, nd4j::DataType::DOUBLE);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::INT32);
NDArray exp('c', {0}, {156}, nd4j::DataType::INT32);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {156}, nd4j::DataType::DOUBLE);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::BOOL);
NDArray exp('c', {0}, {1}, nd4j::DataType::BOOL);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::BOOL);
NDArray exp('c', {0}, {1}, nd4j::DataType::BOOL);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_1) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {17}, nd4j::DataType::INT64);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_2) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {17}, nd4j::DataType::INT64);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_1) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::FLOAT32);
NDArray y('c', {2,2}, {1,2,3,4}, nd4j::DataType::FLOAT32);
NDArray exp('c', {3}, {10,20,30}, nd4j::DataType::DOUBLE);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {0,1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_2) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray y('c', {2,3}, {1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray exp('c', {2}, {10,73}, nd4j::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_3) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray y('c', {3}, {1,2,3}, nd4j::DataType::INT64);
NDArray exp('c', {2,2}, {-22,-4,14,32}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_4) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,2,3}, {10,20,30,40,50,60,70,80,90,100,110,120}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {1820}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0,1,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_1) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {3.605551}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
true);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_2) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2}, {3.405877, 9.715966}, nd4j::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
true);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_3) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2}, {10.606602, 2.121320}, nd4j::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
true);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStatsScalar_1) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {3.605551}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStatsScalar(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
true);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_1) {
NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,0}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {0.050942, -0.183229, -0.093921, 0.075469, 0.257166, -0.254838, 0.342227, -0.682188, -0.004345, 0.464633}, nd4j::DataType::DOUBLE);
std::vector<double> extraArguments = {0., 0.5};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::GaussianDistribution,
&gen,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_2) {
NDArray x('c', {10}, {0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1}, nd4j::DataType::DOUBLE);
NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {0., 0., 0.3, 0., 0.5, 0., 0.7, 0., 0., 1.}, nd4j::DataType::DOUBLE);
std::vector<double> extraArguments = {0.7};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::DropOut,
&gen,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_3) {
NDArray z('c', {10}, {100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537}, nd4j::DataType::DOUBLE);
std::vector<double> extraArguments = {1.5, 2.5};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::UniformDistribution,
&gen,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_4) {
NDArray z('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537}, nd4j::DataType::DOUBLE);
z.permutei({1,0});
std::vector<double> extraArguments = {1.5, 2.5};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
hipError_t cudaResult;
hipStream_t stream;
cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::UniformDistribution,
&gen,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]);
// delete cuda stream
cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
| 9931d0b7ac49db16e04a7b3fd5eb3347c0d4993b.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "testlayers.h"
#include <NDArray.h>
#include <NDArrayFactory.h>
#include <Context.h>
#include <Node.h>
#include <graph/Variable.h>
#include <graph/VariableSpace.h>
#include <specials_cuda.h>
#include <TAD.h>
#include <MmulHelper.h>
#include <cuda.h>
using namespace nd4j;
using namespace nd4j::graph;
class CudaBasicsTests1 : public testing::Test {
public:
};
//////////////////////////////////////////////////////////////////////////
static cudaError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) {
if(devicePtrs.size() != hostData.size())
throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !");
cudaError_t cudaResult;
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult;
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult;
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
cudaStream_t stream = *lc.getCudaStream();
for(int i = 0; i < devicePtrs.size(); ++i) {
cudaResult = cudaMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult;
cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, stream);
}
return cudaResult;
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, TestPairwise_1) {
// allocating host-side arrays
auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5});
auto z = NDArrayFactory::create<double>('c', { 5 }, {0,0,0,0,0});
auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 });
// making raw buffers
Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX;
cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT());
ASSERT_EQ(0, res);
res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo()));
ASSERT_EQ(0, res);
Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t));
CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t));
cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream));
auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream);
cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream);
cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream);
LaunchContext lc(stream, nullptr, nullptr);
NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, x.shapeInfo(), devBufferPtrX, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr, z.shapeInfo(), devBufferPtrZ, reinterpret_cast<Nd4jLong*>(devShapePtrX), nullptr);
res = cudaStreamSynchronize(*stream);
ASSERT_EQ(0, res);
cudaMemcpyAsync(z.buffer(), devBufferPtrZ, z.lengthOf() * x.sizeOfT(), cudaMemcpyDeviceToHost, *stream);
res = cudaStreamSynchronize(*stream);
ASSERT_EQ(0, res);
cudaFree(devBufferPtrX);
cudaFree(devBufferPtrZ);
cudaFree(devShapePtrX);
for (int e = 0; e < z.lengthOf(); e++) {
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
}
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduceScalar_1) {
NDArray x1('c', {2,2}, {0, 1, 2, 3}, nd4j::DataType::INT32);
NDArray x2('c', {2,2}, {0.5, 1.5, -4.5, 3.5}, nd4j::DataType::BFLOAT16);
NDArray x3('c', {2,2}, {0, -1, 0, 1}, nd4j::DataType::BOOL);
NDArray scalar('c', {0}, {0}, nd4j::DataType::INT64);
NDArray exp1('c', {0}, {3}, nd4j::DataType::INT64);
NDArray exp2('c', {0}, {2}, nd4j::DataType::INT64);
NDArray exp3('c', {0}, {1}, nd4j::DataType::INT64);
void *dX1, *dX2, *dX3, *dZ;
Nd4jLong *dX1ShapeInfo, *dX2ShapeInfo, *dX3ShapeInfo, *dZShapeInfo;
cudaError_t cudaResult;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ), scalar.lengthOf() * scalar.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2ShapeInfo), shape::shapeInfoByteLength(x2.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZShapeInfo), shape::shapeInfoByteLength(scalar.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX1ShapeInfo, x1.getShapeInfo(), shape::shapeInfoByteLength(x1.getShapeInfo()), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX2ShapeInfo, x2.getShapeInfo(), shape::shapeInfoByteLength(x2.getShapeInfo()), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX3ShapeInfo, x3.getShapeInfo(), shape::shapeInfoByteLength(x3.getShapeInfo()), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dZShapeInfo, scalar.getShapeInfo(), shape::shapeInfoByteLength(scalar.getShapeInfo()), cudaMemcpyHostToDevice, stream);
void* reductionPointer = nullptr;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024);
ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, reductionPointer);
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc,
nd4j::indexreduce::IndexAbsoluteMax,
x1.buffer(), x1.getShapeInfo(),
dX1, dX1ShapeInfo,
nullptr,
scalar.buffer(), scalar.getShapeInfo(),
dZ, dZShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp1.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execIndexReduceScalar(&lc,
nd4j::indexreduce::IndexAbsoluteMax,
nullptr, x2.getShapeInfo(),
dX2, dX2ShapeInfo,
nullptr,
nullptr, scalar.getShapeInfo(),
dZ, dZShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar.e<float>(0), 1e-5);
// *************************************
NativeOpExecutioner::execIndexReduceScalar(&lc,
nd4j::indexreduce::IndexAbsoluteMax,
nullptr, x3.getShapeInfo(),
dX3, dX3ShapeInfo,
nullptr,
nullptr, scalar.getShapeInfo(),
dZ, dZShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar.buffer(), dZ, scalar.lengthOf() * scalar.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp3.e<float>(0), scalar.e<float>(0), 1e-5);
/***************************************/
cudaFree(dX1); cudaFree(dX2); cudaFree(dX3); cudaFree(dZ);
cudaFree(dX1ShapeInfo); cudaFree(dX2ShapeInfo); cudaFree(dX3ShapeInfo); cudaFree(dZShapeInfo);
/***************************************/
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3Scalar_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x1('c', {2,2}, {1,2,3,4}, nd4j::DataType::INT32);
NDArray x2('c', {2,2}, {-1,-2,-3,-4}, nd4j::DataType::INT32);
NDArray x3('c', {2,2}, {1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray x4('c', {2,2}, {1,2,3,4}, nd4j::DataType::DOUBLE);
NDArray exp1('c', {0}, {-30}, nd4j::DataType::FLOAT32);
NDArray exp2('c', {0}, {15}, nd4j::DataType::DOUBLE);
NDArray scalar1('c', {0}, {100}, nd4j::DataType::FLOAT32);
NDArray scalar2('c', {0}, {100}, nd4j::DataType::DOUBLE);
void *dX1, *dX2, *dX3, *dX4, *dZ1, *dZ2;
Nd4jLong *dX1ShapeInfo, *dX3ShapeInfo, *dZ1ShapeInfo, *dZ2ShapeInfo;
cudaError_t cudaResult;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1), x1.lengthOf() * x1.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX2), x2.lengthOf() * x2.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3), x3.lengthOf() * x3.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX4), x4.lengthOf() * x4.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ1), scalar1.lengthOf() * scalar1.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ2), scalar2.lengthOf() * scalar2.sizeOfT()); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX1ShapeInfo), shape::shapeInfoByteLength(x1.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dX3ShapeInfo), shape::shapeInfoByteLength(x3.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ1ShapeInfo), shape::shapeInfoByteLength(scalar1.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&dZ2ShapeInfo), shape::shapeInfoByteLength(scalar2.getShapeInfo())); ASSERT_EQ(0, cudaResult);
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(dX1, x1.buffer(), x1.lengthOf() * x1.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX2, x2.buffer(), x2.lengthOf() * x2.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX3, x3.buffer(), x3.lengthOf() * x3.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX4, x4.buffer(), x4.lengthOf() * x4.sizeOfT(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX1ShapeInfo, x1.getShapeInfo(), shape::shapeInfoByteLength(x1.getShapeInfo()), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dX3ShapeInfo, x3.getShapeInfo(), shape::shapeInfoByteLength(x3.getShapeInfo()), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dZ1ShapeInfo, scalar1.getShapeInfo(), shape::shapeInfoByteLength(scalar1.getShapeInfo()), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dZ2ShapeInfo, scalar2.getShapeInfo(), shape::shapeInfoByteLength(scalar2.getShapeInfo()), cudaMemcpyHostToDevice, stream);
/***************************************/
void* reductionPointer = nullptr;
int* allocationPointer = nullptr;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream, reductionPointer, nullptr, allocationPointer);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, nd4j::reduce3::Dot,nullptr, x1.getShapeInfo(),dX1, dX1ShapeInfo, nullptr, nullptr, x2.getShapeInfo(),dX2, dX1ShapeInfo,nullptr, scalar1.getShapeInfo(),dZ1, dZ1ShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar1.buffer(), dZ1, scalar1.lengthOf() * scalar1.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp1.e<float>(0), scalar1.e<float>(0), 1e-5);
/***************************************/
NativeOpExecutioner::execReduce3Scalar(&lc, nd4j::reduce3::Dot,nullptr, x3.getShapeInfo(),dX3, dX3ShapeInfo, nullptr, nullptr, x4.getShapeInfo(),dX4, dX3ShapeInfo,nullptr, scalar2.getShapeInfo(),dZ2, dZ2ShapeInfo);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
cudaMemcpyAsync(scalar2.buffer(), dZ2, scalar2.lengthOf() * scalar2.sizeOfT(), cudaMemcpyDeviceToHost, stream);
cudaResult = cudaStreamSynchronize(stream);
ASSERT_EQ(0, cudaResult);
ASSERT_NEAR(exp2.e<float>(0), scalar2.e<float>(0), 1e-5);
/***************************************/
cudaFree(dX1); cudaFree(dX2); cudaFree(dX3); cudaFree(dX4); cudaFree(dZ1); cudaFree(dZ2);
cudaFree(dX1ShapeInfo); cudaFree(dX3ShapeInfo); cudaFree(dZ1ShapeInfo); cudaFree(dZ2ShapeInfo);
/***************************************/
cudaResult = cudaStreamDestroy(stream);
ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_1) {
NDArray x('c', {2,2}, {1,2,3,4}, nd4j::DataType::INT32);
NDArray y('c', {2,2}, {-1,-2,-3,-4}, nd4j::DataType::INT32);
NDArray exp('c', {0}, {-30}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0, 1};
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
nullptr, nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_2) {
NDArray x('c', {2,2}, {1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,2}, {1,2,3,4}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {15}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {0, 1};
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
nullptr, nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_3) {
NDArray x('c', {2,3}, {1,2,3,4,5,6}, nd4j::DataType::INT32);
NDArray y('c', {2,3}, {-6,-5,-4,-3,-2,-1}, nd4j::DataType::INT32);
NDArray exp('c', {3}, {-18,-20,-18}, nd4j::DataType::FLOAT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_4) {
NDArray x('c', {2,3}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2}, {9,22.5}, nd4j::DataType::DOUBLE);
NDArray z('c', {2}, {100,100}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3_5) {
NDArray x('c', {2,2,3}, {1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::FLOAT32);
NDArray y('c', {2,2,3}, {1,2,3,4,5,6,7,8,9,10,11,12}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3}, {7.5, 10.5, 13.5, 25.5, 28.5, 31.5}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_1) {
NDArray x('c', {2,2}, {1,2,3,4}, nd4j::DataType::INT32);
NDArray y('c', {2,3}, {-1,1,-1,1,-1,1}, nd4j::DataType::INT32);
NDArray exp('c', {2,3}, {2,-2,2,2,-2,2}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4 -- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3All_2) {
NDArray x('c', {2,2}, {1,2,3,4}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,3}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2,3}, {6,6,6,9,9,9}, nd4j::DataType::DOUBLE);
NDArray z('c', {2,3}, {100,100,100,100,100,100,},nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {0};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// evaluate yTad data
shape::TAD yTad;
yTad.init(y.getShapeInfo(), dimensions.data(), dimensions.size());
yTad.createTadOnlyShapeInfo();
yTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
hostData.emplace_back(yTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(yTad.tadOnlyShapeInfo));// 3 -- yTadShapeInfo
hostData.emplace_back(yTad.tadOffsets, yTad.numTads * sizeof(Nd4jLong)); // 4-- yTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3All(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
(Nd4jLong*)devicePtrs[3], (Nd4jLong*)devicePtrs[4]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_1) {
NDArray x('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
x.linspace(-2.); x.syncToDevice();
NDArray exp('c', {2}, {2, 2}, nd4j::DataType::INT64);
NDArray z('c', {2}, {100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, nd4j::indexreduce::IndexMax,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_2) {
NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
x.linspace(-2.f); x.syncToDevice();
NDArray exp('c', {2,5}, {11,11,11,11,11,11,11,11,11,11}, nd4j::DataType::INT64);
NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, nd4j::indexreduce::IndexMax,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execIndexReduce_3) {
NDArray x('c', {2,3,4,5}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,
100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
x.linspace(-2.); x.syncToDevice();
NDArray exp('c', {3}, {39, 39, 39}, nd4j::DataType::INT64);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {0,2,3};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execIndexReduce(&lc, nd4j::indexreduce::IndexMax,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3}, {0,1,2,3,4,5}, nd4j::DataType::INT64);
NDArray exp('c',{2,3}, {0,0,1,1,2,2}, nd4j::DataType::INT64);
NDArray scalar('c',{0}, {2}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::INT64);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, nd4j::scalar::Divide,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.getShapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_2) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3}, {-1,-2,-3,-4,-5,-6}, nd4j::DataType::INT64);
NDArray exp('c',{2,3}, {10,10,10,10,10,10}, nd4j::DataType::FLOAT32);
NDArray scalar('c',{0}, {10}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, nd4j::scalar::CopyPws,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.getShapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalar_3) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3,2}, {0,1,2,3,4,5,6,7,8,9,10,11}, nd4j::DataType::INT64);
NDArray scalars('c',{2,2}, {1,2,3,4}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3,2}, {0,0,2,1,4,2, 2,1,2,2,3,2}, nd4j::DataType::INT64);
NDArray z('c', {2,3,2}, {100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalar(&lc, nd4j::scalar::Divide,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalars.getShapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_1) {
NDArray x('c', {2,3}, {-1,-2,0,1,2,3}, nd4j::DataType::BFLOAT16);
NDArray scalar('c',{0}, {0}, nd4j::DataType::BFLOAT16);
NDArray exp('c',{2,3}, {0,0,0,1,1,1}, nd4j::DataType::BOOL);
NDArray z('c', {2,3}, {100,100,100,100,100,100,}, nd4j::DataType::BOOL);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, nd4j::scalar::GreaterThan,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalar.getShapeInfo(), scalar.specialBuffer(), scalar.specialShapeInfo(),
nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execScalarBool_2) {
NDArray x('c', {2,3}, {0,1,2,3,4,5}, nd4j::DataType::FLOAT32);
NDArray scalars('c',{2}, {-1,4}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3}, {1,1,1,0,0,1}, nd4j::DataType::BOOL);
NDArray z('c', {2,3}, {100,100,100,100,100,100}, nd4j::DataType::BOOL);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execScalarBool(&lc, nd4j::scalar::GreaterThan,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, scalars.getShapeInfo(), scalars.specialBuffer(), scalars.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray y('c', {3}, {10, 20, 30}, nd4j::DataType::INT64);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, nd4j::DataType::INT32);
x.linspace(0); x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Add,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcast_2) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::FLOAT32);
x.linspace(0); x.syncToDevice();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Add,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_1) {
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray y('c', {3}, {2, 12, 22}, nd4j::DataType::INT32);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,}, nd4j::DataType::BOOL);
NDArray exp('c', {2,3,4}, {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0}, nd4j::DataType::BOOL);
x.linspace(1); x.syncToDevice();
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(&lc, nd4j::broadcast::EqualTo,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execBroadcastBool_2) {
NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100},nd4j::DataType::FLOAT32);
NDArray y('c', {2,4}, {1,10,10,15,20,20,20,24}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {2,3,4}, {1, 0, 0, 0,0, 0, 0, 0,0, 1, 0, 0,0, 0, 0, 0,0, 0, 0, 0,0, 0, 0, 1}, nd4j::DataType::BOOL);
x.linspace(1); x.syncToDevice();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execBroadcastBool(&lc, nd4j::broadcast::EqualTo,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseTransform_1) {
if (!Environment::getInstance()->isExperimentalBuild())
return;
NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, nd4j::DataType::INT32);
NDArray y('c', {4,2}, {0.1,0.2,0.3,0.4,1.5,0.6,0.7,1.8}, nd4j::DataType::DOUBLE);
NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {8}, {0,1,2,3,3,5,6,6}, nd4j::DataType::INT32);
x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseTransform(&lc, nd4j::pairwise::Subtract,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execPairwiseBoolTransform_1) {
NDArray x('c', {2,2,2}, {1,5,3,7,2,6,4,8}, nd4j::DataType::INT64);
NDArray y('c', {4,2}, {0,2,0,4,0,6,0,8}, nd4j::DataType::INT64);
NDArray z('c', {8}, {100,100,100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {8}, {0,1,0,1,0,1,0,1}, nd4j::DataType::BOOL);
x.permutei({2,1,0}); // -> {1,2,3,4,5,6,7,8}
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execPairwiseBoolTransform(&lc, nd4j::pairwise::EqualTo,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_1) {
NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::DOUBLE);
NDArray z('c', {4}, {100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {4}, {0, 1.5, 2.5, 3.5}, nd4j::DataType::FLOAT32);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, nd4j::transform::Sqrt,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformFloat_2) {
NDArray x('c', {1,4}, {0, 4, 9, 16}, nd4j::DataType::INT64);
NDArray z('c', {2,2}, {100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2,2}, {0, 2, 3, 4}, nd4j::DataType::DOUBLE);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformFloat(&lc, nd4j::transform::Sqrt,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_1) {
NDArray x('c', {2,2}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::DOUBLE);
NDArray z('c', {4,1}, {100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {4,1}, {0, 2, 6, 12}, nd4j::DataType::INT32);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, nd4j::transform::Assign,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformAny_2) {
NDArray x('c', {1,4}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::BFLOAT16);
NDArray z('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,2}, {0, 6.25, 2.25, 12.25}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformAny(&lc, nd4j::transform::Assign,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_1) {
NDArray x('c', {2,3}, {0,2,4,1,3,5}, nd4j::DataType::DOUBLE);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, nd4j::DataType::DOUBLE);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, nd4j::transform::CubeDerivative,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformStrict_2) {
NDArray x('c', {6}, {0,1,2,3,4,5}, nd4j::DataType::FLOAT32);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {3,2}, {0, 3, 12, 27, 48, 75}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformStrict(&lc, nd4j::transform::CubeDerivative,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_1) {
NDArray x('c', {2,3}, {0,2.5,4.5,1.5,3.5,5.5}, nd4j::DataType::DOUBLE);
NDArray z('c', {1,6}, {100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {1,6}, {0,2.25,6.25,12.25,20.25,30.25}, nd4j::DataType::DOUBLE);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, nd4j::transform::Square,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformSame_2) {
NDArray x('c', {6}, {0,1,2,3,4,5}, nd4j::DataType::INT32);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {3,2}, {0,1,4,9,16,25}, nd4j::DataType::INT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformSame(&lc, nd4j::transform::Square,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_1) {
NDArray x('c', {2,3}, {0,2,4,-1,-3,-5}, nd4j::DataType::DOUBLE);
NDArray z('c', {1,6}, {100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {1,6}, {0,0,1,0,1,0}, nd4j::DataType::BOOL);
x.permutei({1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, nd4j::transform::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execTransformBool_2) {
NDArray x('c', {6}, {0,-1,2,-3,4,-5}, nd4j::DataType::INT32);
NDArray z('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {3,2}, {0,0,1,0,1,0}, nd4j::DataType::BOOL);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// call cuda kernel which calculates result
NativeOpExecutioner::execTransformBool(&lc, nd4j::transform::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, nullptr, nullptr);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {3}, {2.5, 6.5, 10.5}, nd4j::DataType::FLOAT32);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloat(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloat_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2,4}, {-1., 0., 1., 2.,11., 12., 13., 14.}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloat(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::INT32);
NDArray exp('c', {3}, {20, 52, 84}, nd4j::DataType::INT32);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSame(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSame_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {2,4}, {-3., 0., 3., 6.,33., 36., 39., 42.}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSame(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {3}, {0, 1, 1}, nd4j::DataType::BOOL);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBool(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBool_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::BOOL);
NDArray exp('c', {2,4}, {1, 1, 1, 1, 0, 0, 0, 0}, nd4j::DataType::BOOL);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBool(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_1) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::INT32);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::INT64);
NDArray exp('c', {3}, {5,6,6}, nd4j::DataType::INT64);
x.permutei({2,1,0});
x.syncShape();
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLong(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLong_2) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,4}, {100,100,100,100,100,100,100,100}, nd4j::DataType::INT64);
NDArray exp('c', {2,4}, {3, 1, 3, 2, 2, 1, 2, 3}, nd4j::DataType::INT64);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLong(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i)
cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
NDArray exp('c', {0}, {6.5}, nd4j::DataType::FLOAT32);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceFloatScalar_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {6.5}, nd4j::DataType::DOUBLE);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceFloatScalar(&lc, nd4j::reduce::Mean,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::INT32);
NDArray exp('c', {0}, {156}, nd4j::DataType::INT32);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceSameScalar_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {156}, nd4j::DataType::DOUBLE);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceSameScalar(&lc, nd4j::reduce::Sum,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_1) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::BOOL);
NDArray exp('c', {0}, {1}, nd4j::DataType::BOOL);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceBoolScalar_2) {
NDArray x('c', {2,3,4}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::BOOL);
NDArray exp('c', {0}, {1}, nd4j::DataType::BOOL);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceBoolScalar(&lc, nd4j::reduce::IsPositive,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_1) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::INT32);
NDArray z('c', {0}, {100}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {17}, nd4j::DataType::INT64);
x.permutei({2,1,0});
x.syncShape();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduceLongScalar_2) {
NDArray x('c', {2,3,4}, {-5,0,-3,0,-1,0,1,2,3,4,5,6,7,0,9,10,11,0,13,14,0,16,0,18}, nd4j::DataType::DOUBLE);
NDArray z('c', {0}, {100}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {17}, nd4j::DataType::INT64);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
int* allocationPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
lc.setAllocationPointer(allocationPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduceLongScalar(&lc, nd4j::reduce::CountNonZero,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo());
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_1) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::FLOAT32);
NDArray y('c', {2,2}, {1,2,3,4}, nd4j::DataType::FLOAT32);
NDArray exp('c', {3}, {10,20,30}, nd4j::DataType::DOUBLE);
NDArray z('c', {3}, {100,100,100}, nd4j::DataType::DOUBLE);
std::vector<int> dimensions = {0,1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_2) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray y('c', {2,3}, {1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray exp('c', {2}, {10,73}, nd4j::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_3) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray y('c', {3}, {1,2,3}, nd4j::DataType::INT64);
NDArray exp('c', {2,2}, {-22,-4,14,32}, nd4j::DataType::FLOAT32);
NDArray z('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execReduce3TAD_4) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray y('c', {2,2,3}, {10,20,30,40,50,60,70,80,90,100,110,120}, nd4j::DataType::DOUBLE);
NDArray exp('c', {0}, {1820}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0,1,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execReduce3TAD(&lc, nd4j::reduce3::Dot,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_1) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {3.605551}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
true);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_2) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2}, {3.405877, 9.715966}, nd4j::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {0,2};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
true);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStats_3) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-20,-1,0,1,2,3,4,5,6}, nd4j::DataType::DOUBLE);
NDArray exp('c', {2}, {10.606602, 2.121320}, nd4j::DataType::FLOAT32);
NDArray z('c', {2}, {100,100}, nd4j::DataType::FLOAT32);
std::vector<int> dimensions = {1};
// evaluate xTad data
shape::TAD xTad;
xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size());
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions
hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo));// 1 -- xTadShapeInfo
hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStats(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
(int*)devicePtrs[0], dimensions.size(),
(Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2],
true);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
////////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execSummaryStatsScalar_1) {
NDArray x('c', {2,2,3}, {-5,-4,-3,-2,-1,0,1,2,3,4,5,6}, nd4j::DataType::INT64);
NDArray exp('c', {0}, {3.605551}, nd4j::DataType::FLOAT32);
NDArray z('c', {0}, {100}, nd4j::DataType::FLOAT32);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
void* reductionPointer;
cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); ASSERT_EQ(0, cudaResult);
lc.setReductionPointer(reductionPointer);
// call cuda kernel which calculates result
NativeOpExecutioner::execSummaryStatsScalar(&lc, nd4j::variance::SummaryStatsStandardDeviation,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
true);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_1) {
NDArray z('c', {10}, {100,0,0,0,0,0,0,0,0,0}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {0.050942, -0.183229, -0.093921, 0.075469, 0.257166, -0.254838, 0.342227, -0.682188, -0.004345, 0.464633}, nd4j::DataType::DOUBLE);
std::vector<double> extraArguments = {0., 0.5};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::GaussianDistribution,
&gen,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_2) {
NDArray x('c', {10}, {0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1}, nd4j::DataType::DOUBLE);
NDArray z('c', {2,5}, {100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {0., 0., 0.3, 0., 0.5, 0., 0.7, 0., 0., 1.}, nd4j::DataType::DOUBLE);
std::vector<double> extraArguments = {0.7};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::DropOut,
&gen,
nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(),
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_3) {
NDArray z('c', {10}, {100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537}, nd4j::DataType::DOUBLE);
std::vector<double> extraArguments = {1.5, 2.5};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::UniformDistribution,
&gen,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
//////////////////////////////////////////////////////////////////////////
TEST_F(CudaBasicsTests1, execRandom_4) {
NDArray z('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE);
NDArray exp('c', {10}, {2.373649, 2.239791, 1.887353, 2.488636, 2.068904, 2.281399, 1.828228, 2.228222, 2.490847, 1.669537}, nd4j::DataType::DOUBLE);
z.permutei({1,0});
std::vector<double> extraArguments = {1.5, 2.5};
nd4j::graph::RandomGenerator gen(119,5);
// prepare input arrays for prepareDataForCuda function
std::vector<std::pair<void*,size_t>> hostData;
hostData.emplace_back(extraArguments.data(), extraArguments.size() * sizeof(double)); // 0 -- dimensions
std::vector<void*> devicePtrs(hostData.size(), nullptr);
// create cuda stream and LaunchContext
cudaError_t cudaResult;
cudaStream_t stream;
cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult);
LaunchContext lc(&stream);
// allocate required amount of global device memory and copy host data to it
cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult);
// call cuda kernel which calculates result
NativeOpExecutioner::execRandom(&lc, nd4j::random::UniformDistribution,
&gen,
nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(),
devicePtrs[0]);
cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult);
z.syncToHost();
// verify results
for (int e = 0; e < z.lengthOf(); e++)
ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5);
// free allocated global device memory
for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]);
// delete cuda stream
cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult);
}
|
2b809378aceb91c30f63670d6e12c54ccff8bd97.hip | // !!! This is a file automatically generated by hipify!!!
#include "FFT.h"
#include "hipfft.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define C(X) if((X)!=hipSuccess) return false
bool FFT(float* input, std::complex<float>* output, const unsigned long size, const unsigned long batch) {
hipfftReal *in, *in_h;
hipfftComplex *out, *out_h;
if (hipMalloc((void**)&in, size * batch * sizeof(hipfftReal)) != hipSuccess)
return false;
if (hipHostMalloc((void**)&in_h, size * batch * sizeof(hipfftReal)) != hipSuccess) {
hipFree(in);
return false;
}
if (hipMalloc((void**)&out, (size / 2 + 1) * batch * sizeof(hipfftComplex)) != hipSuccess) {
hipFree(in);
hipHostFree(in_h);
return false;
}
if (hipHostMalloc((void**)&out_h, (size / 2 + 1) * batch * sizeof(hipfftComplex)) != hipSuccess) {
hipFree(in);
hipHostFree(in_h);
hipFree(out);
return false;
}
auto p = in_h;
for (unsigned long i = 0; i < size* batch; ++i)
*p++ = (hipfftReal)*input++;
if (hipMemcpy(in, in_h, size * batch * sizeof(hipfftReal), hipMemcpyHostToDevice) != hipSuccess) {
hipFree(in);
hipHostFree(in_h);
hipFree(out);
hipHostFree(out_h);
return false;
}
hipfftHandle plan;
//hipfftPlan1d(&plan, size, HIPFFT_R2C, batch);
int n = size;
hipfftPlanMany(&plan, 1, &n,
NULL, 1, size,
NULL, 1, (size / 2 + 1), HIPFFT_R2C, batch);
hipfftExecR2C(plan, in, out);
hipDeviceSynchronize();
if (hipMemcpy(out_h, out, (size / 2 + 1) * batch * sizeof(hipfftComplex), hipMemcpyDeviceToHost) != hipSuccess) {
hipFree(in);
hipHostFree(in_h);
hipFree(out);
hipHostFree(out_h);
return false;
}
hipfftDestroy(plan);
for (unsigned long i = 0; i < (size / 2 + 1) * batch; ++i)
output[i] = std::complex<float>(out_h[i].x, out_h[i].y);
hipFree(in);
hipHostFree(in_h);
hipFree(out);
hipHostFree(out_h);
return true;
}
| 2b809378aceb91c30f63670d6e12c54ccff8bd97.cu | #include "FFT.h"
#include "cufft.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define C(X) if((X)!=cudaSuccess) return false
bool FFT(float* input, std::complex<float>* output, const unsigned long size, const unsigned long batch) {
cufftReal *in, *in_h;
cufftComplex *out, *out_h;
if (cudaMalloc((void**)&in, size * batch * sizeof(cufftReal)) != cudaSuccess)
return false;
if (cudaMallocHost((void**)&in_h, size * batch * sizeof(cufftReal)) != cudaSuccess) {
cudaFree(in);
return false;
}
if (cudaMalloc((void**)&out, (size / 2 + 1) * batch * sizeof(cufftComplex)) != cudaSuccess) {
cudaFree(in);
cudaFreeHost(in_h);
return false;
}
if (cudaMallocHost((void**)&out_h, (size / 2 + 1) * batch * sizeof(cufftComplex)) != cudaSuccess) {
cudaFree(in);
cudaFreeHost(in_h);
cudaFree(out);
return false;
}
auto p = in_h;
for (unsigned long i = 0; i < size* batch; ++i)
*p++ = (cufftReal)*input++;
if (cudaMemcpy(in, in_h, size * batch * sizeof(cufftReal), cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(in);
cudaFreeHost(in_h);
cudaFree(out);
cudaFreeHost(out_h);
return false;
}
cufftHandle plan;
//cufftPlan1d(&plan, size, CUFFT_R2C, batch);
int n = size;
cufftPlanMany(&plan, 1, &n,
NULL, 1, size,
NULL, 1, (size / 2 + 1), CUFFT_R2C, batch);
cufftExecR2C(plan, in, out);
cudaDeviceSynchronize();
if (cudaMemcpy(out_h, out, (size / 2 + 1) * batch * sizeof(cufftComplex), cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(in);
cudaFreeHost(in_h);
cudaFree(out);
cudaFreeHost(out_h);
return false;
}
cufftDestroy(plan);
for (unsigned long i = 0; i < (size / 2 + 1) * batch; ++i)
output[i] = std::complex<float>(out_h[i].x, out_h[i].y);
cudaFree(in);
cudaFreeHost(in_h);
cudaFree(out);
cudaFreeHost(out_h);
return true;
}
|
70904f7182aa492fa56d50a62d0dd88a56f543b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transfer.h>
#include <quda_internal.h>
#include <quda_matrix.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <index_helper.cuh>
#include <color_spinor.h>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
namespace quda {
template <typename Float, int Ns, int Nc, QudaReconstructType gRecon>
struct WuppertalSmearingArg {
typedef typename colorspinor_mapper<Float,Ns,Nc>::type F;
typedef typename gauge_mapper<Float,gRecon>::type G;
F out; // output vector field
const F in; // input vector field
const G U; // the gauge field
const Float A; // A parameter
const Float B; // B parameter
const int parity; // only use this for single parity fields
const int nParity; // number of parities we're working on
const int nFace; // hard code to 1 for now
const int dim[5]; // full lattice dimensions
const int commDim[4]; // whether a given dimension is partitioned or not
const int volumeCB; // checkerboarded volume
WuppertalSmearingArg(ColorSpinorField &out, const ColorSpinorField &in, int parity, const GaugeField &U,
Float A, Float B)
: out(out), in(in), U(U), A(A), B(B), parity(parity), nParity(in.SiteSubset()), nFace(1),
dim{ (3-nParity) * in.X(0), in.X(1), in.X(2), in.X(3), 1 },
commDim{comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3)},
volumeCB(in.VolumeCB())
{
if (in.FieldOrder() != QUDA_FLOAT2_FIELD_ORDER || !U.isNative())
errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", in.FieldOrder(), U.FieldOrder());
}
};
/**
Computes out = sum_mu U_mu(x)in(x+d) + U^\dagger_mu(x-d)in(x-d)
@param[out] out The out result field
@param[in] U The gauge field
@param[in] in The input field
@param[in] x_cb The checkerboarded site index
@param[in] parity The site parity
*/
template <typename Float, int Nc, typename Vector, typename Arg>
__device__ __host__ inline void computeNeighborSum(Vector &out, Arg &arg, int x_cb, int parity) {
typedef Matrix<complex<Float>,Nc> Link;
const int their_spinor_parity = (arg.nParity == 2) ? 1-parity : 0;
int coord[5];
getCoords(coord, x_cb, arg.dim, parity);
coord[4] = 0;
#pragma unroll
for (int dir=0; dir<3; dir++) { // loop over spatial directions
//Forward gather - compute fwd offset for vector fetch
const int fwd_idx = linkIndexP1(coord, arg.dim, dir);
if ( arg.commDim[dir] && (coord[dir] + arg.nFace >= arg.dim[dir]) ) {
const int ghost_idx = ghostFaceIndex<1>(coord, arg.dim, dir, arg.nFace);
const Link U = arg.U(dir, x_cb, parity);
const Vector in = arg.in.Ghost(dir, 1, ghost_idx, their_spinor_parity);
out += U * in;
} else {
const Link U = arg.U(dir, x_cb, parity);
const Vector in = arg.in(fwd_idx, their_spinor_parity);
out += U * in;
}
//Backward gather - compute back offset for spinor and gauge fetch
const int back_idx = linkIndexM1(coord, arg.dim, dir);
const int gauge_idx = back_idx;
if ( arg.commDim[dir] && (coord[dir] - arg.nFace < 0) ) {
const int ghost_idx = ghostFaceIndex<0>(coord, arg.dim, dir, arg.nFace);
const Link U = arg.U.Ghost(dir, ghost_idx, 1-parity);
const Vector in = arg.in.Ghost(dir, 0, ghost_idx, their_spinor_parity);
out += conj(U) * in;
} else {
const Link U = arg.U(dir, gauge_idx, 1-parity);
const Vector in = arg.in(back_idx, their_spinor_parity);
out += conj(U) * in;
}
}
}
//out(x) = A in(x) + B computeNeighborSum(out, x)
template <typename Float, int Ns, int Nc, typename Arg>
__device__ __host__ inline void computeWupperalStep(Arg &arg, int x_cb, int parity)
{
typedef ColorSpinor<Float,Nc,Ns> Vector;
Vector out;
computeNeighborSum<Float,Nc>(out, arg, x_cb, parity);
Vector in;
arg.in.load((Float*)in.data, x_cb, parity);
out = arg.A*in + arg.B*out;
arg.out(x_cb, parity) = out;
}
// CPU kernel for applying a wuppertal smearing step to a vector
template <typename Float, int Ns, int Nc, typename Arg>
void wuppertalStepCPU(Arg arg)
{
for (int parity= 0; parity < arg.nParity; parity++) {
// for full fields then set parity from loop else use arg setting
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
computeWupperalStep<Float,Ns,Nc>(arg, x_cb, parity);
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying a wuppertal smearing step to a vector
template <typename Float, int Ns, int Nc, typename Arg>
__global__ void wuppertalStepGPU(Arg arg)
{
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
// for full fields set parity from y thread index else use arg setting
int parity = blockDim.y*blockIdx.y + threadIdx.y;
if (x_cb >= arg.volumeCB) return;
if (parity >= arg.nParity) return;
parity = (arg.nParity == 2) ? parity : arg.parity;
computeWupperalStep<Float,Ns,Nc>(arg, x_cb, parity);
}
template <typename Float, int Ns, int Nc, typename Arg>
class WuppertalSmearing : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const
{
return (2*3*Ns*Nc*(8*Nc-2) + 2*3*Nc*Ns )*arg.nParity*(long long)meta.VolumeCB();
}
long long bytes() const
{
return arg.out.Bytes() + (2*3+1)*arg.in.Bytes() + arg.nParity*2*3*arg.U.Bytes()*meta.VolumeCB();
}
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
unsigned int maxBlockSize() const { return deviceProp.maxThreadsPerBlock / arg.nParity; }
public:
WuppertalSmearing(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
}
virtual ~WuppertalSmearing() { }
void apply(const hipStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
wuppertalStepCPU<Float,Ns,Nc>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( wuppertalStepGPU<Float,Ns,Nc>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template<typename Float, int Ns, int Nc, QudaReconstructType gRecon>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
WuppertalSmearingArg<Float,Ns,Nc,gRecon> arg(out, in, parity, U, A, B);
WuppertalSmearing<Float,Ns,Nc,WuppertalSmearingArg<Float,Ns,Nc,gRecon> > wuppertal(arg, in);
wuppertal.apply(0);
}
// template on the gauge reconstruction
template<typename Float, int Ns, int Nc>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) {
wuppertalStep<Float,Ns,Nc,QUDA_RECONSTRUCT_NO>(out, in, parity, U, A, B);
} else if(U.Reconstruct() == QUDA_RECONSTRUCT_12) {
wuppertalStep<Float,Ns,Nc,QUDA_RECONSTRUCT_12>(out, in, parity, U, A, B);
} else if(U.Reconstruct() == QUDA_RECONSTRUCT_8) {
wuppertalStep<Float,Ns,Nc,QUDA_RECONSTRUCT_8>(out, in, parity, U, A, B);
} else {
errorQuda("Reconstruction type %d of origin gauge field not supported", U.Reconstruct());
}
}
// template on the number of colors
template<typename Float, int Ns>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if (out.Ncolor() != in.Ncolor()) {
errorQuda("Orign and destination fields must have the same number of colors\n");
}
if (out.Ncolor() == 3 ) {
wuppertalStep<Float,Ns,3>(out, in, parity, U, A, B);
} else {
errorQuda(" is not implemented for Ncolor!=3");
}
}
// template on the number of spins
template<typename Float>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if(out.Nspin() != in.Nspin()) {
errorQuda("Orign and destination fields must have the same number of spins\n");
}
if (out.Nspin() == 4 ){
wuppertalStep<Float,4>(out, in, parity, U, A, B);
}else if (in.Nspin() == 1 ){
wuppertalStep<Float,1>(out, in, parity, U, A, B);
}else{
errorQuda("Nspin %d not supported", out.Nspin());
}
}
/**
Apply a generic Wuppertal smearing step
Computes out(x) = A*in(x) + B*\sum_mu (U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu))
@param[out] out The out result field
@param[in] in The in spinor field
@param[in] U The gauge field
@param[in] A The scaling factor for in(x)
@param[in] B The scaling factor for \sum_mu (U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu))
*/
// template on the precision
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if (in.V() == out.V()) {
errorQuda("Orign and destination fields must be different pointers");
}
// check precisions match
checkPrecision(out, in, U);
// check all locations match
checkLocation(out, in, U);
const int nFace = 1;
in.exchangeGhost((QudaParity)(1-parity), nFace, 0); // last parameter is dummy
if (out.Precision() == QUDA_SINGLE_PRECISION){
wuppertalStep<float>(out, in, parity, U, A, B);
} else if(out.Precision() == QUDA_DOUBLE_PRECISION) {
wuppertalStep<double>(out, in, parity, U, A, B);
} else {
errorQuda("Precision %d not supported", out.Precision());
}
in.bufferIndex = (1 - in.bufferIndex);
return;
}
/**
Apply a standard Wuppertal smearing step
Computes out(x) = 1/(1+6*alpha)*(in(x) + alpha*\sum_mu (U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)))
@param[out] out The out result field
@param[in] in The in spinor field
@param[in] U The gauge field
@param[in] alpha The smearing parameter
*/
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity, const GaugeField& U, double alpha)
{
wuppertalStep(out, in, parity, U, 1./(1.+6.*alpha), alpha/(1.+6.*alpha));
}
} // namespace quda
| 70904f7182aa492fa56d50a62d0dd88a56f543b0.cu | #include <transfer.h>
#include <quda_internal.h>
#include <quda_matrix.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <index_helper.cuh>
#include <color_spinor.h>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
namespace quda {
template <typename Float, int Ns, int Nc, QudaReconstructType gRecon>
struct WuppertalSmearingArg {
typedef typename colorspinor_mapper<Float,Ns,Nc>::type F;
typedef typename gauge_mapper<Float,gRecon>::type G;
F out; // output vector field
const F in; // input vector field
const G U; // the gauge field
const Float A; // A parameter
const Float B; // B parameter
const int parity; // only use this for single parity fields
const int nParity; // number of parities we're working on
const int nFace; // hard code to 1 for now
const int dim[5]; // full lattice dimensions
const int commDim[4]; // whether a given dimension is partitioned or not
const int volumeCB; // checkerboarded volume
WuppertalSmearingArg(ColorSpinorField &out, const ColorSpinorField &in, int parity, const GaugeField &U,
Float A, Float B)
: out(out), in(in), U(U), A(A), B(B), parity(parity), nParity(in.SiteSubset()), nFace(1),
dim{ (3-nParity) * in.X(0), in.X(1), in.X(2), in.X(3), 1 },
commDim{comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3)},
volumeCB(in.VolumeCB())
{
if (in.FieldOrder() != QUDA_FLOAT2_FIELD_ORDER || !U.isNative())
errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", in.FieldOrder(), U.FieldOrder());
}
};
/**
Computes out = sum_mu U_mu(x)in(x+d) + U^\dagger_mu(x-d)in(x-d)
@param[out] out The out result field
@param[in] U The gauge field
@param[in] in The input field
@param[in] x_cb The checkerboarded site index
@param[in] parity The site parity
*/
template <typename Float, int Nc, typename Vector, typename Arg>
__device__ __host__ inline void computeNeighborSum(Vector &out, Arg &arg, int x_cb, int parity) {
typedef Matrix<complex<Float>,Nc> Link;
const int their_spinor_parity = (arg.nParity == 2) ? 1-parity : 0;
int coord[5];
getCoords(coord, x_cb, arg.dim, parity);
coord[4] = 0;
#pragma unroll
for (int dir=0; dir<3; dir++) { // loop over spatial directions
//Forward gather - compute fwd offset for vector fetch
const int fwd_idx = linkIndexP1(coord, arg.dim, dir);
if ( arg.commDim[dir] && (coord[dir] + arg.nFace >= arg.dim[dir]) ) {
const int ghost_idx = ghostFaceIndex<1>(coord, arg.dim, dir, arg.nFace);
const Link U = arg.U(dir, x_cb, parity);
const Vector in = arg.in.Ghost(dir, 1, ghost_idx, their_spinor_parity);
out += U * in;
} else {
const Link U = arg.U(dir, x_cb, parity);
const Vector in = arg.in(fwd_idx, their_spinor_parity);
out += U * in;
}
//Backward gather - compute back offset for spinor and gauge fetch
const int back_idx = linkIndexM1(coord, arg.dim, dir);
const int gauge_idx = back_idx;
if ( arg.commDim[dir] && (coord[dir] - arg.nFace < 0) ) {
const int ghost_idx = ghostFaceIndex<0>(coord, arg.dim, dir, arg.nFace);
const Link U = arg.U.Ghost(dir, ghost_idx, 1-parity);
const Vector in = arg.in.Ghost(dir, 0, ghost_idx, their_spinor_parity);
out += conj(U) * in;
} else {
const Link U = arg.U(dir, gauge_idx, 1-parity);
const Vector in = arg.in(back_idx, their_spinor_parity);
out += conj(U) * in;
}
}
}
//out(x) = A in(x) + B computeNeighborSum(out, x)
template <typename Float, int Ns, int Nc, typename Arg>
__device__ __host__ inline void computeWupperalStep(Arg &arg, int x_cb, int parity)
{
typedef ColorSpinor<Float,Nc,Ns> Vector;
Vector out;
computeNeighborSum<Float,Nc>(out, arg, x_cb, parity);
Vector in;
arg.in.load((Float*)in.data, x_cb, parity);
out = arg.A*in + arg.B*out;
arg.out(x_cb, parity) = out;
}
// CPU kernel for applying a wuppertal smearing step to a vector
template <typename Float, int Ns, int Nc, typename Arg>
void wuppertalStepCPU(Arg arg)
{
for (int parity= 0; parity < arg.nParity; parity++) {
// for full fields then set parity from loop else use arg setting
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
computeWupperalStep<Float,Ns,Nc>(arg, x_cb, parity);
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying a wuppertal smearing step to a vector
template <typename Float, int Ns, int Nc, typename Arg>
__global__ void wuppertalStepGPU(Arg arg)
{
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
// for full fields set parity from y thread index else use arg setting
int parity = blockDim.y*blockIdx.y + threadIdx.y;
if (x_cb >= arg.volumeCB) return;
if (parity >= arg.nParity) return;
parity = (arg.nParity == 2) ? parity : arg.parity;
computeWupperalStep<Float,Ns,Nc>(arg, x_cb, parity);
}
template <typename Float, int Ns, int Nc, typename Arg>
class WuppertalSmearing : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const
{
return (2*3*Ns*Nc*(8*Nc-2) + 2*3*Nc*Ns )*arg.nParity*(long long)meta.VolumeCB();
}
long long bytes() const
{
return arg.out.Bytes() + (2*3+1)*arg.in.Bytes() + arg.nParity*2*3*arg.U.Bytes()*meta.VolumeCB();
}
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
unsigned int maxBlockSize() const { return deviceProp.maxThreadsPerBlock / arg.nParity; }
public:
WuppertalSmearing(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
}
virtual ~WuppertalSmearing() { }
void apply(const cudaStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
wuppertalStepCPU<Float,Ns,Nc>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
wuppertalStepGPU<Float,Ns,Nc> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template<typename Float, int Ns, int Nc, QudaReconstructType gRecon>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
WuppertalSmearingArg<Float,Ns,Nc,gRecon> arg(out, in, parity, U, A, B);
WuppertalSmearing<Float,Ns,Nc,WuppertalSmearingArg<Float,Ns,Nc,gRecon> > wuppertal(arg, in);
wuppertal.apply(0);
}
// template on the gauge reconstruction
template<typename Float, int Ns, int Nc>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if (U.Reconstruct() == QUDA_RECONSTRUCT_NO) {
wuppertalStep<Float,Ns,Nc,QUDA_RECONSTRUCT_NO>(out, in, parity, U, A, B);
} else if(U.Reconstruct() == QUDA_RECONSTRUCT_12) {
wuppertalStep<Float,Ns,Nc,QUDA_RECONSTRUCT_12>(out, in, parity, U, A, B);
} else if(U.Reconstruct() == QUDA_RECONSTRUCT_8) {
wuppertalStep<Float,Ns,Nc,QUDA_RECONSTRUCT_8>(out, in, parity, U, A, B);
} else {
errorQuda("Reconstruction type %d of origin gauge field not supported", U.Reconstruct());
}
}
// template on the number of colors
template<typename Float, int Ns>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if (out.Ncolor() != in.Ncolor()) {
errorQuda("Orign and destination fields must have the same number of colors\n");
}
if (out.Ncolor() == 3 ) {
wuppertalStep<Float,Ns,3>(out, in, parity, U, A, B);
} else {
errorQuda(" is not implemented for Ncolor!=3");
}
}
// template on the number of spins
template<typename Float>
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if(out.Nspin() != in.Nspin()) {
errorQuda("Orign and destination fields must have the same number of spins\n");
}
if (out.Nspin() == 4 ){
wuppertalStep<Float,4>(out, in, parity, U, A, B);
}else if (in.Nspin() == 1 ){
wuppertalStep<Float,1>(out, in, parity, U, A, B);
}else{
errorQuda("Nspin %d not supported", out.Nspin());
}
}
/**
Apply a generic Wuppertal smearing step
Computes out(x) = A*in(x) + B*\sum_mu (U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu))
@param[out] out The out result field
@param[in] in The in spinor field
@param[in] U The gauge field
@param[in] A The scaling factor for in(x)
@param[in] B The scaling factor for \sum_mu (U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu))
*/
// template on the precision
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity,
const GaugeField& U, double A, double B)
{
if (in.V() == out.V()) {
errorQuda("Orign and destination fields must be different pointers");
}
// check precisions match
checkPrecision(out, in, U);
// check all locations match
checkLocation(out, in, U);
const int nFace = 1;
in.exchangeGhost((QudaParity)(1-parity), nFace, 0); // last parameter is dummy
if (out.Precision() == QUDA_SINGLE_PRECISION){
wuppertalStep<float>(out, in, parity, U, A, B);
} else if(out.Precision() == QUDA_DOUBLE_PRECISION) {
wuppertalStep<double>(out, in, parity, U, A, B);
} else {
errorQuda("Precision %d not supported", out.Precision());
}
in.bufferIndex = (1 - in.bufferIndex);
return;
}
/**
Apply a standard Wuppertal smearing step
Computes out(x) = 1/(1+6*alpha)*(in(x) + alpha*\sum_mu (U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)))
@param[out] out The out result field
@param[in] in The in spinor field
@param[in] U The gauge field
@param[in] alpha The smearing parameter
*/
void wuppertalStep(ColorSpinorField &out, const ColorSpinorField &in, int parity, const GaugeField& U, double alpha)
{
wuppertalStep(out, in, parity, U, 1./(1.+6.*alpha), alpha/(1.+6.*alpha));
}
} // namespace quda
|
e10e02c345849a8b52c5562bb4c1cb50e55331ab.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2013, Los Alamos National Security, LLC
All rights reserved.
Copyright 2013. Los Alamos National Security, LLC. This software was produced under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL),
which is operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S. Government has rights to use, reproduce, and distribute this software.
NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.
If software is modified to produce derivative works, such modified software should be clearly marked, so as not to confuse it with the version available from LANL.
Additionally, redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
Neither the name of Los Alamos National Security, LLC, Los Alamos National Laboratory, LANL, the U.S. Government, nor the names of its contributors may be used
to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <fstream>
#include <iostream>
#include <sstream>
#include <float.h>
#include "CoGLRender.h"
#define STRINGIZE(x) #x
#define STRINGIZE_VALUE_OF(x) STRINGIZE(x)
//------------------------------------------------------------------------------
// CoGLRender::CoGLRender()
//
// Constructor for CoGLRender class
//------------------------------------------------------------------------------
CoGLRender::CoGLRender()
{
}
//---------------------------------------------------------------------------
// CoGLRender::~CoGLRender()
//
// Destructor of CoGLRender class
//---------------------------------------------------------------------------
CoGLRender::~CoGLRender()
{
#ifdef USE_INTEROP
if (vbo_buffers[0])
{
for (int i=0; i<3; i++) hipGraphicsUnregisterResource(vbo_resources[i]);
for (int i=0; i<3; i++)
{
glBindBuffer(1, vbo_buffers[i]);
glDeleteBuffers(1, &(vbo_buffers[i]));
vbo_buffers[i] = 0;
}
}
#else
vertices.clear(); normals.clear(); colors.clear();
#endif
}
//---------------------------------------------------------------------------
// CoGLRender::initialize()
//
// Initialize OpenGL settings, and call initialize_simulation
//---------------------------------------------------------------------------
void CoGLRender::initialize()
{
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
float white[] = { 0.5, 0.5, 0.5, 1.0 };
float black[] = { 0.0, 0.0, 0.0, 1.0 };
float light_pos[] = { 0.0, 0.0, 10.0, 1.0 };
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, white);
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 100);
glLightfv(GL_LIGHT0, GL_AMBIENT, white);
glLightfv(GL_LIGHT0, GL_DIFFUSE, white);
glLightfv(GL_LIGHT0, GL_SPECULAR, black);
glLightfv(GL_LIGHT0, GL_POSITION, light_pos);
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, 1);
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, 1);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_NORMALIZE);
glEnable(GL_COLOR_MATERIAL);
#ifdef USE_INTEROP
glewInit();
hipGLSetGLDevice(0);
// initialize contour buffer objects
glGenBuffers(3, vbo_buffers);
for (int i=0; i<3; i++)
{
unsigned int buffer_size = DIM3*VERTICES_PER_CELL*sizeof(float4);
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[i]);
glBufferData(GL_ARRAY_BUFFER, buffer_size, 0, GL_DYNAMIC_DRAW);
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
for (int i=0; i<3; i++) hipGraphicsGLRegisterBuffer(&(vbo_resources[i]), vbo_buffers[i], hipGraphicsMapFlagsWriteDiscard);
#endif
initialize_simulation();
rotate(0.2, -0.2);
max_camera_fov = 40.0; zoom_pct = 0.5;
}
//---------------------------------------------------------------------------
// CoGLRender::display()
//
// Render the structured grid, with colors based on the deviatoric strain
//---------------------------------------------------------------------------
void CoGLRender::display()
{
time_step();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(max_camera_fov*zoom_pct, 1.0, 1.0, 1000.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, 8.0f*DIM, 0, 0, 0, 0, 1, 0);
glPushMatrix();
glTranslatef(-DIM/2.0f, -DIM/2.0f, -DIM/2.0f);
qrot.getRotMat(rotation_matrix);
glMultMatrixf(rotation_matrix);
float center_x = DIM/2.0f; float center_y = DIM/2.0f; float center_z = DIM/2.0f;
GLfloat matrix[16];
glGetFloatv(GL_MODELVIEW_MATRIX, matrix);
float offset_x = matrix[0]*center_x + matrix[1]*center_y + matrix[2]*center_z;
float offset_y = matrix[4]*center_x + matrix[5]*center_y + matrix[6]*center_z;
float offset_z = matrix[8]*center_x + matrix[9]*center_y + matrix[10]*center_z;
offset_x = center_x - offset_x; offset_y = center_y - offset_y; offset_z = center_z - offset_z;
glTranslatef(-offset_x, -offset_y, -offset_z);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
#ifdef USE_INTEROP
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[0]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[1]);
glColorPointer(4, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[2]);
glNormalPointer(GL_FLOAT, 0, 0);
glDrawArrays(GL_QUADS, 0, simulation->n_vertices);
glBindBuffer(GL_ARRAY_BUFFER, 0);
#else
if (cur_timestep < max_timesteps) colors.assign(simulation->colors_begin(), simulation->colors_end());
glNormalPointer(GL_FLOAT, 0, &normals[0]);
glColorPointer(4, GL_FLOAT, 0, &colors[0]);
glVertexPointer(3, GL_FLOAT, 0, &vertices[0]);
glDrawArrays(GL_QUADS, 0, simulation->n_vertices);
#endif
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glPopMatrix();
}
//------------------------------------------------------------------------------
// CoGLRender::rotate()
//
// param float a_angle1 Rotation about first axis
// param float a_angle2 Rotation about second axis
//
// Rotate view based on input angles
//------------------------------------------------------------------------------
void CoGLRender::rotate(float a_angle1, float a_angle2)
{
Quaternion new_rot_x;
new_rot_x.setEulerAngles(a_angle1, 0.0, 0.0);
qrot.mul(new_rot_x);
Quaternion new_rot_y;
new_rot_y.setEulerAngles(0.0, 0.0, a_angle2);
qrot.mul(new_rot_y);
}
//------------------------------------------------------------------------------
// CoGLRender::zoom()
//
// param float a_delta Change to percentage of max FOV to use for field of view angle
//
// Increment the zoom level based on input parameter
//------------------------------------------------------------------------------
void CoGLRender::zoom(float a_delta)
{
zoom_pct += a_delta;
if (zoom_pct > 1.0) zoom_pct = 1.0; if (zoom_pct < 0.0) zoom_pct = 0.0;
}
//---------------------------------------------------------------------------
// CoGLRender::initialize_simulation()
//
// Initialize simulation parameters, read input data, and pass to CoGLSim class
//---------------------------------------------------------------------------
void CoGLRender::initialize_simulation()
{
simulation = new CoGLSim<double>(DIM);
#ifdef USE_INTEROP
for (int i=0; i<3; i++) simulation->vbo_resources[i] = vbo_resources[i];
#endif
simulation->create_mesh();
#ifndef USE_INTEROP
normals.assign(simulation->normals_begin(), simulation->normals_end());
vertices.assign(simulation->vertices_begin(), simulation->vertices_end());
colors.assign(simulation->colors_begin(), simulation->colors_end());
#endif
h_ux.resize(DIM3); h_uy.resize(DIM3); h_uz.resize(DIM3);
h_uxdt.resize(DIM3); h_uydt.resize(DIM3); h_uzdt.resize(DIM3);
h_uxx_applied.resize(DIM3);
max_timesteps=200000; temp=255.0; d2=1.0;
eta=10.0; delta=0.01; h=1.0;
shear_a=28.0*pow(10.0,10); bulk_a=14.0*pow(10.0,10);
ao=1.97*pow(10.0,10);
as = shear_a/ao; ac = bulk_a/ao;
printf("AS=%lf, AC=%lf\n", as, ac);
tau=(temp-270.)/(295.-270.);
eta0_xx=(3.795-3.756)/3.756;
eta0_yy=eta0_xx;
eta0_zz=(3.725-3.756)/3.756;
e1_0=(eta0_xx+eta0_yy+eta0_zz)/sqrt(3.);
e3_0=(eta0_xx+eta0_yy-2.0*eta0_zz)/sqrt(6.);
ao=1.97*pow(10.0,10);
eo=-0.5000*ac*e1_0/e3_0;
char fuxFilename[1024], fuyFilename[1024], fuzFilename[1024];
sprintf(fuxFilename, "%s/fort.31", STRINGIZE_VALUE_OF(DATA_DIRECTORY));
sprintf(fuyFilename, "%s/fort.32", STRINGIZE_VALUE_OF(DATA_DIRECTORY));
sprintf(fuzFilename, "%s/fort.33", STRINGIZE_VALUE_OF(DATA_DIRECTORY));
FILE* fux = fopen(fuxFilename, "r");
FILE* fuy = fopen(fuyFilename, "r");
FILE* fuz = fopen(fuzFilename, "r");
if ((!fux)) { printf("Initial conditions file not found\n"); exit(-1); }
for (unsigned int i=0; i<DIM; i++)
for (unsigned int j=0; j<DIM; j++)
for (unsigned l=0; l<DIM; l++)
{
fscanf(fux, "%lf ", &(h_ux[i*DIM2+j*DIM+l]));
fscanf(fuy, "%lf ", &(h_uy[i*DIM2+j*DIM+l]));
fscanf(fuz, "%lf ", &(h_uz[i*DIM2+j*DIM+l]));
}
fclose(fux); fclose(fuy); fclose(fuz);
thrust::fill(h_uxdt.begin(), h_uxdt.end(), 0.0);
thrust::fill(h_uydt.begin(), h_uydt.end(), 0.0);
thrust::fill(h_uzdt.begin(), h_uzdt.end(), 0.0);
thrust::fill(h_uxx_applied.begin(), h_uxx_applied.end(), 0.0);
simulation->initialize_simulation(h_ux, h_uy, h_uz, h_uxdt, h_uydt, h_uzdt, h_uxx_applied, ac, as, d2, delta, eo, eta, h, tau);
printf("Initial conditions set\n");
cur_timestep=0;
}
//---------------------------------------------------------------------------
// CoGLRender::time_step()
//
// Advance the simulation by calling the CoGLSim class, and report timings
//---------------------------------------------------------------------------
void CoGLRender::time_step()
{
if (cur_timestep == 0) gettimeofday(&begin, 0);
if (cur_timestep < max_timesteps)
{
simulation->advance_simulation();
simulation->color_mesh();
cur_timestep++;
}
if (cur_timestep == max_timesteps)
{
gettimeofday(&end, 0);
timersub(&end, &begin, &diff);
float seconds = diff.tv_sec + 1.0E-6*diff.tv_usec;
std::cout << "Time: " << seconds << std::endl;
}
}
//---------------------------------------------------------------------------
// CoGLRender::time_simulation()
//
// param int a_iters Number of iterations to time
//
// Time the simulation for a given number of iterations, without rendering
//---------------------------------------------------------------------------
void CoGLRender::time_simulation(int a_iters)
{
#ifdef USE_INTEROP
printf("You must disable interop in CMake configuration in order to run timing tests without rendering\n");
#else
initialize_simulation();
cur_timestep = 0;
max_timesteps = a_iters;
for (unsigned int i=0; i<max_timesteps; i++) time_step();
#endif
}
| e10e02c345849a8b52c5562bb4c1cb50e55331ab.cu | /*
Copyright (c) 2013, Los Alamos National Security, LLC
All rights reserved.
Copyright 2013. Los Alamos National Security, LLC. This software was produced under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL),
which is operated by Los Alamos National Security, LLC for the U.S. Department of Energy. The U.S. Government has rights to use, reproduce, and distribute this software.
NEITHER THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.
If software is modified to produce derivative works, such modified software should be clearly marked, so as not to confuse it with the version available from LANL.
Additionally, redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
· Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
· Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
· Neither the name of Los Alamos National Security, LLC, Los Alamos National Laboratory, LANL, the U.S. Government, nor the names of its contributors may be used
to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <fstream>
#include <iostream>
#include <sstream>
#include <float.h>
#include "CoGLRender.h"
#define STRINGIZE(x) #x
#define STRINGIZE_VALUE_OF(x) STRINGIZE(x)
//------------------------------------------------------------------------------
// CoGLRender::CoGLRender()
//
// Constructor for CoGLRender class
//------------------------------------------------------------------------------
CoGLRender::CoGLRender()
{
}
//---------------------------------------------------------------------------
// CoGLRender::~CoGLRender()
//
// Destructor of CoGLRender class
//---------------------------------------------------------------------------
CoGLRender::~CoGLRender()
{
#ifdef USE_INTEROP
if (vbo_buffers[0])
{
for (int i=0; i<3; i++) cudaGraphicsUnregisterResource(vbo_resources[i]);
for (int i=0; i<3; i++)
{
glBindBuffer(1, vbo_buffers[i]);
glDeleteBuffers(1, &(vbo_buffers[i]));
vbo_buffers[i] = 0;
}
}
#else
vertices.clear(); normals.clear(); colors.clear();
#endif
}
//---------------------------------------------------------------------------
// CoGLRender::initialize()
//
// Initialize OpenGL settings, and call initialize_simulation
//---------------------------------------------------------------------------
void CoGLRender::initialize()
{
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
float white[] = { 0.5, 0.5, 0.5, 1.0 };
float black[] = { 0.0, 0.0, 0.0, 1.0 };
float light_pos[] = { 0.0, 0.0, 10.0, 1.0 };
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, white);
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 100);
glLightfv(GL_LIGHT0, GL_AMBIENT, white);
glLightfv(GL_LIGHT0, GL_DIFFUSE, white);
glLightfv(GL_LIGHT0, GL_SPECULAR, black);
glLightfv(GL_LIGHT0, GL_POSITION, light_pos);
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, 1);
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, 1);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_NORMALIZE);
glEnable(GL_COLOR_MATERIAL);
#ifdef USE_INTEROP
glewInit();
cudaGLSetGLDevice(0);
// initialize contour buffer objects
glGenBuffers(3, vbo_buffers);
for (int i=0; i<3; i++)
{
unsigned int buffer_size = DIM3*VERTICES_PER_CELL*sizeof(float4);
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[i]);
glBufferData(GL_ARRAY_BUFFER, buffer_size, 0, GL_DYNAMIC_DRAW);
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
for (int i=0; i<3; i++) cudaGraphicsGLRegisterBuffer(&(vbo_resources[i]), vbo_buffers[i], cudaGraphicsMapFlagsWriteDiscard);
#endif
initialize_simulation();
rotate(0.2, -0.2);
max_camera_fov = 40.0; zoom_pct = 0.5;
}
//---------------------------------------------------------------------------
// CoGLRender::display()
//
// Render the structured grid, with colors based on the deviatoric strain
//---------------------------------------------------------------------------
void CoGLRender::display()
{
time_step();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(max_camera_fov*zoom_pct, 1.0, 1.0, 1000.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0, 0, 8.0f*DIM, 0, 0, 0, 0, 1, 0);
glPushMatrix();
glTranslatef(-DIM/2.0f, -DIM/2.0f, -DIM/2.0f);
qrot.getRotMat(rotation_matrix);
glMultMatrixf(rotation_matrix);
float center_x = DIM/2.0f; float center_y = DIM/2.0f; float center_z = DIM/2.0f;
GLfloat matrix[16];
glGetFloatv(GL_MODELVIEW_MATRIX, matrix);
float offset_x = matrix[0]*center_x + matrix[1]*center_y + matrix[2]*center_z;
float offset_y = matrix[4]*center_x + matrix[5]*center_y + matrix[6]*center_z;
float offset_z = matrix[8]*center_x + matrix[9]*center_y + matrix[10]*center_z;
offset_x = center_x - offset_x; offset_y = center_y - offset_y; offset_z = center_z - offset_z;
glTranslatef(-offset_x, -offset_y, -offset_z);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
#ifdef USE_INTEROP
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[0]);
glVertexPointer(3, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[1]);
glColorPointer(4, GL_FLOAT, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, vbo_buffers[2]);
glNormalPointer(GL_FLOAT, 0, 0);
glDrawArrays(GL_QUADS, 0, simulation->n_vertices);
glBindBuffer(GL_ARRAY_BUFFER, 0);
#else
if (cur_timestep < max_timesteps) colors.assign(simulation->colors_begin(), simulation->colors_end());
glNormalPointer(GL_FLOAT, 0, &normals[0]);
glColorPointer(4, GL_FLOAT, 0, &colors[0]);
glVertexPointer(3, GL_FLOAT, 0, &vertices[0]);
glDrawArrays(GL_QUADS, 0, simulation->n_vertices);
#endif
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glPopMatrix();
}
//------------------------------------------------------------------------------
// CoGLRender::rotate()
//
// param float a_angle1 Rotation about first axis
// param float a_angle2 Rotation about second axis
//
// Rotate view based on input angles
//------------------------------------------------------------------------------
void CoGLRender::rotate(float a_angle1, float a_angle2)
{
Quaternion new_rot_x;
new_rot_x.setEulerAngles(a_angle1, 0.0, 0.0);
qrot.mul(new_rot_x);
Quaternion new_rot_y;
new_rot_y.setEulerAngles(0.0, 0.0, a_angle2);
qrot.mul(new_rot_y);
}
//------------------------------------------------------------------------------
// CoGLRender::zoom()
//
// param float a_delta Change to percentage of max FOV to use for field of view angle
//
// Increment the zoom level based on input parameter
//------------------------------------------------------------------------------
void CoGLRender::zoom(float a_delta)
{
zoom_pct += a_delta;
if (zoom_pct > 1.0) zoom_pct = 1.0; if (zoom_pct < 0.0) zoom_pct = 0.0;
}
//---------------------------------------------------------------------------
// CoGLRender::initialize_simulation()
//
// Initialize simulation parameters, read input data, and pass to CoGLSim class
//---------------------------------------------------------------------------
void CoGLRender::initialize_simulation()
{
simulation = new CoGLSim<double>(DIM);
#ifdef USE_INTEROP
for (int i=0; i<3; i++) simulation->vbo_resources[i] = vbo_resources[i];
#endif
simulation->create_mesh();
#ifndef USE_INTEROP
normals.assign(simulation->normals_begin(), simulation->normals_end());
vertices.assign(simulation->vertices_begin(), simulation->vertices_end());
colors.assign(simulation->colors_begin(), simulation->colors_end());
#endif
h_ux.resize(DIM3); h_uy.resize(DIM3); h_uz.resize(DIM3);
h_uxdt.resize(DIM3); h_uydt.resize(DIM3); h_uzdt.resize(DIM3);
h_uxx_applied.resize(DIM3);
max_timesteps=200000; temp=255.0; d2=1.0;
eta=10.0; delta=0.01; h=1.0;
shear_a=28.0*pow(10.0,10); bulk_a=14.0*pow(10.0,10);
ao=1.97*pow(10.0,10);
as = shear_a/ao; ac = bulk_a/ao;
printf("AS=%lf, AC=%lf\n", as, ac);
tau=(temp-270.)/(295.-270.);
eta0_xx=(3.795-3.756)/3.756;
eta0_yy=eta0_xx;
eta0_zz=(3.725-3.756)/3.756;
e1_0=(eta0_xx+eta0_yy+eta0_zz)/sqrt(3.);
e3_0=(eta0_xx+eta0_yy-2.0*eta0_zz)/sqrt(6.);
ao=1.97*pow(10.0,10);
eo=-0.5000*ac*e1_0/e3_0;
char fuxFilename[1024], fuyFilename[1024], fuzFilename[1024];
sprintf(fuxFilename, "%s/fort.31", STRINGIZE_VALUE_OF(DATA_DIRECTORY));
sprintf(fuyFilename, "%s/fort.32", STRINGIZE_VALUE_OF(DATA_DIRECTORY));
sprintf(fuzFilename, "%s/fort.33", STRINGIZE_VALUE_OF(DATA_DIRECTORY));
FILE* fux = fopen(fuxFilename, "r");
FILE* fuy = fopen(fuyFilename, "r");
FILE* fuz = fopen(fuzFilename, "r");
if ((!fux)) { printf("Initial conditions file not found\n"); exit(-1); }
for (unsigned int i=0; i<DIM; i++)
for (unsigned int j=0; j<DIM; j++)
for (unsigned l=0; l<DIM; l++)
{
fscanf(fux, "%lf ", &(h_ux[i*DIM2+j*DIM+l]));
fscanf(fuy, "%lf ", &(h_uy[i*DIM2+j*DIM+l]));
fscanf(fuz, "%lf ", &(h_uz[i*DIM2+j*DIM+l]));
}
fclose(fux); fclose(fuy); fclose(fuz);
thrust::fill(h_uxdt.begin(), h_uxdt.end(), 0.0);
thrust::fill(h_uydt.begin(), h_uydt.end(), 0.0);
thrust::fill(h_uzdt.begin(), h_uzdt.end(), 0.0);
thrust::fill(h_uxx_applied.begin(), h_uxx_applied.end(), 0.0);
simulation->initialize_simulation(h_ux, h_uy, h_uz, h_uxdt, h_uydt, h_uzdt, h_uxx_applied, ac, as, d2, delta, eo, eta, h, tau);
printf("Initial conditions set\n");
cur_timestep=0;
}
//---------------------------------------------------------------------------
// CoGLRender::time_step()
//
// Advance the simulation by calling the CoGLSim class, and report timings
//---------------------------------------------------------------------------
void CoGLRender::time_step()
{
if (cur_timestep == 0) gettimeofday(&begin, 0);
if (cur_timestep < max_timesteps)
{
simulation->advance_simulation();
simulation->color_mesh();
cur_timestep++;
}
if (cur_timestep == max_timesteps)
{
gettimeofday(&end, 0);
timersub(&end, &begin, &diff);
float seconds = diff.tv_sec + 1.0E-6*diff.tv_usec;
std::cout << "Time: " << seconds << std::endl;
}
}
//---------------------------------------------------------------------------
// CoGLRender::time_simulation()
//
// param int a_iters Number of iterations to time
//
// Time the simulation for a given number of iterations, without rendering
//---------------------------------------------------------------------------
void CoGLRender::time_simulation(int a_iters)
{
#ifdef USE_INTEROP
printf("You must disable interop in CMake configuration in order to run timing tests without rendering\n");
#else
initialize_simulation();
cur_timestep = 0;
max_timesteps = a_iters;
for (unsigned int i=0; i<max_timesteps; i++) time_step();
#endif
}
|
fd52047a06dabb57268629bc656b19f27e83b650.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_19.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4901203553897,0.00131177121243054,0.777786288577117,0.777647224146045,0.000176819820661720,0.484277066680072,0.00295670386892032,0.999998321645759,1.95882583997698e-08,1.91086959570826e-05,0.999769802784257,1.00742294542800,0.999998504302701,3.74218001174359e-05,1.41921088197810,10.0015161419689,139.208342414277};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.1722612334159,0.000259112383736700,0.000154841929841439,0.000361218133317334,0.277128455649609,0.153642408006870,0.209381667465666,4.20509839372909,0.0199270314805181,1.58059649007092,1098.43907813844,0.000639220600349527,0.0905927390261824,0.0181442296796367,0.00430751059648478,1.23911116806789e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| fd52047a06dabb57268629bc656b19f27e83b650.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_19.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4901203553897,0.00131177121243054,0.777786288577117,0.777647224146045,0.000176819820661720,0.484277066680072,0.00295670386892032,0.999998321645759,1.95882583997698e-08,1.91086959570826e-05,0.999769802784257,1.00742294542800,0.999998504302701,3.74218001174359e-05,1.41921088197810,10.0015161419689,139.208342414277};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.1722612334159,0.000259112383736700,0.000154841929841439,0.000361218133317334,0.277128455649609,0.153642408006870,0.209381667465666,4.20509839372909,0.0199270314805181,1.58059649007092,1098.43907813844,0.000639220600349527,0.0905927390261824,0.0181442296796367,0.00430751059648478,1.23911116806789e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
4c52249908da21f480c224c42cf82f320b6f2eb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add_i32 (int* left_op, int* right_op, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] + right_op[idx];
}
} | 4c52249908da21f480c224c42cf82f320b6f2eb8.cu | #include "includes.h"
__global__ void add_i32 (int* left_op, int* right_op, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] + right_op[idx];
}
} |
782a1dceb92f78bf2de3935426945600f5dbc519.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
COMPILAR USANDO O SEGUINTE COMANDO:
nvcc segmented_sort.cu -o segmented_sort -std=c++11 --expt-extended-lambda -I"/home/schmid/Dropbox/Unicamp/workspace/sorting_segments/moderngpu-master/src"
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm> // std::sort
#include <hip/hip_runtime.h>
#include <iostream>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
void printSeg(uint* host_data, uint num_seg, uint num_ele) {
std::cout << "\n";
for (uint i = 0; i < num_seg; i++) {
std::cout << host_data[i] << " ";
}
std::cout << num_ele << " ";
std::cout << "\n";
}
void segmented_sorting(uint* vec, uint* seg, int number_of_segments) {
for(int i = 0; i < number_of_segments; i++) {
std::stable_sort (&vec[seg[i]], &vec[seg[i+1]]);
}
}
int main(int argc, char** argv) {
uint num_of_segments;
uint num_of_elements;
uint i;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments + 1);
uint *h_seg = (uint *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
uint mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec_aux = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec_aux[i]);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
uint *h_vec = (uint *) malloc(mem_size_vec);
for (uint j = 0; j < EXECUTIONS; j++) {
for (i = 0; i < num_of_elements; i++)
h_vec[i] = h_vec_aux[i];
hipEventRecord(start);
segmented_sorting(h_vec, h_seg, num_of_segments);
hipEventRecord(stop);
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
}
if (ELAPSED_TIME != 1) {
print(h_vec, num_of_elements);
}
free(h_seg);
free(h_vec);
free(h_vec_aux);
return 0;
}
/***
* SEGMENTED SORT FUNCIONANDO
*
*
uint n = atoi(argv[1]);
uint m = atoi(argv[2]);
uint num_segments = n / m;
mgpu::standard_context_t context;
rand_key<uint> func(m);
mgpu::mem_t<uint> segs = mgpu::fill_function(func, num_segments, context);
//mgpu::mem_t<uint> segs = mgpu::fill_random(0, n - 1, num_segments, true, context);
std::vector<uint> segs_host = mgpu::from_mem(segs);
mgpu::mem_t<uint> data = mgpu::fill_random(0, pow(2, NUMBER_BITS_SIZE), n,
false, context);
mgpu::mem_t<uint> values(n, context);
std::vector<uint> data_host = mgpu::from_mem(data);
// print(segs_host); print(data_host);
mgpu::segmented_sort(data.data(), values.data(), n, segs.data(),
num_segments, mgpu::less_t<uint>(), context);
std::vector<uint> sorted = from_mem(data);
std::vector<uint> indices_host = from_mem(values);
std::cout << "\n";
//print(segs_host);
// print(data_host); print(indices_host);
*
*/
| 782a1dceb92f78bf2de3935426945600f5dbc519.cu | /*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
COMPILAR USANDO O SEGUINTE COMANDO:
nvcc segmented_sort.cu -o segmented_sort -std=c++11 --expt-extended-lambda -I"/home/schmid/Dropbox/Unicamp/workspace/sorting_segments/moderngpu-master/src"
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm> // std::sort
#include <cuda.h>
#include <iostream>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
void printSeg(uint* host_data, uint num_seg, uint num_ele) {
std::cout << "\n";
for (uint i = 0; i < num_seg; i++) {
std::cout << host_data[i] << " ";
}
std::cout << num_ele << " ";
std::cout << "\n";
}
void segmented_sorting(uint* vec, uint* seg, int number_of_segments) {
for(int i = 0; i < number_of_segments; i++) {
std::stable_sort (&vec[seg[i]], &vec[seg[i+1]]);
}
}
int main(int argc, char** argv) {
uint num_of_segments;
uint num_of_elements;
uint i;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments + 1);
uint *h_seg = (uint *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
uint mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec_aux = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec_aux[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
uint *h_vec = (uint *) malloc(mem_size_vec);
for (uint j = 0; j < EXECUTIONS; j++) {
for (i = 0; i < num_of_elements; i++)
h_vec[i] = h_vec_aux[i];
cudaEventRecord(start);
segmented_sorting(h_vec, h_seg, num_of_segments);
cudaEventRecord(stop);
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
}
if (ELAPSED_TIME != 1) {
print(h_vec, num_of_elements);
}
free(h_seg);
free(h_vec);
free(h_vec_aux);
return 0;
}
/***
* SEGMENTED SORT FUNCIONANDO
*
*
uint n = atoi(argv[1]);
uint m = atoi(argv[2]);
uint num_segments = n / m;
mgpu::standard_context_t context;
rand_key<uint> func(m);
mgpu::mem_t<uint> segs = mgpu::fill_function(func, num_segments, context);
//mgpu::mem_t<uint> segs = mgpu::fill_random(0, n - 1, num_segments, true, context);
std::vector<uint> segs_host = mgpu::from_mem(segs);
mgpu::mem_t<uint> data = mgpu::fill_random(0, pow(2, NUMBER_BITS_SIZE), n,
false, context);
mgpu::mem_t<uint> values(n, context);
std::vector<uint> data_host = mgpu::from_mem(data);
// print(segs_host); print(data_host);
mgpu::segmented_sort(data.data(), values.data(), n, segs.data(),
num_segments, mgpu::less_t<uint>(), context);
std::vector<uint> sorted = from_mem(data);
std::vector<uint> indices_host = from_mem(values);
std::cout << "\n";
//print(segs_host);
// print(data_host); print(indices_host);
*
*/
|
133e24d37e11d59bd5a733597225d2a9e13f37c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include <stdlib.h>
__global__
void MatAdd(int* A, int* B, int* C, int nx, int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
C[idx] = A[idx] + B[idx];
}
void GenerateMatrix(int* m, int size)
{
for (int i=0; i<size; ++i)
m[i] = rand()% 10;
}
int main( void ) {
// size of matrix
int nx = 4; //
int ny = 5; //
int size = nx*ny;
size_t sizeBytes = size * sizeof(int);
int* h_A = (int*)malloc(sizeBytes);
int* h_B = (int*)malloc(sizeBytes);
int* h_C = (int*)malloc(sizeBytes);
GenerateMatrix(h_A, size);
GenerateMatrix(h_B, size);
int* d_A;
int* d_B;
int* d_C;
hipMalloc((void**)&d_A, sizeBytes);
hipMalloc((void**)&d_B, sizeBytes);
hipMalloc((void**)&d_C, sizeBytes);
hipMemcpy(d_A, h_A, sizeBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeBytes, hipMemcpyHostToDevice);
int BlockPerGrid = ny; //
int ThreadsPerBlock = nx; //
hipLaunchKernelGGL(( MatAdd), dim3(BlockPerGrid), dim3(ThreadsPerBlock) , 0, 0, d_A, d_B, d_C, nx, ny);
hipMemcpy(h_C, d_C, sizeBytes, hipMemcpyDeviceToHost);
// display the results
for (int i=0; i<size; i++) {
printf( "%d + %d = %d ", h_A[i], h_B[i], h_C[i] );
if (i%size == 0) printf("\n");
}
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
| 133e24d37e11d59bd5a733597225d2a9e13f37c8.cu | #include "stdio.h"
#include <stdlib.h>
__global__
void MatAdd(int* A, int* B, int* C, int nx, int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
C[idx] = A[idx] + B[idx];
}
void GenerateMatrix(int* m, int size)
{
for (int i=0; i<size; ++i)
m[i] = rand()% 10;
}
int main( void ) {
// size of matrix
int nx = 4; // столбцы
int ny = 5; // строки
int size = nx*ny;
size_t sizeBytes = size * sizeof(int);
int* h_A = (int*)malloc(sizeBytes);
int* h_B = (int*)malloc(sizeBytes);
int* h_C = (int*)malloc(sizeBytes);
GenerateMatrix(h_A, size);
GenerateMatrix(h_B, size);
int* d_A;
int* d_B;
int* d_C;
cudaMalloc((void**)&d_A, sizeBytes);
cudaMalloc((void**)&d_B, sizeBytes);
cudaMalloc((void**)&d_C, sizeBytes);
cudaMemcpy(d_A, h_A, sizeBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeBytes, cudaMemcpyHostToDevice);
int BlockPerGrid = ny; // строки
int ThreadsPerBlock = nx; // столбцы
MatAdd<<< BlockPerGrid, ThreadsPerBlock >>>(d_A, d_B, d_C, nx, ny);
cudaMemcpy(h_C, d_C, sizeBytes, cudaMemcpyDeviceToHost);
// display the results
for (int i=0; i<size; i++) {
printf( "%d + %d = %d ", h_A[i], h_B[i], h_C[i] );
if (i%size == 0) printf("\n");
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
431f0c558a725323bbb0ba404a00d4ca8668e1ee.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <opencv2/opencv.hpp>
#include <vector>
#include <sstream>
#include <opencv2/core/cuda.hpp>
using namespace cv;
using namespace std;
__global__ static void test_kernel(unsigned char *img1,
const int row,
const int col
)
{
const int tid=threadIdx.x;
double scale=1.2f;
for(int level=0;level<10;level++)
{
for(int i=tid;i<row/scale;i+=256)
{
float fy=(float)((i+0.5)*scale-0.5);
int sy=(int)fy;
fy-=sy;
sy=(sy<row-2)?sy:row-2;
sy=(sy<0)?0:sy;
short cbufy[2];
cbufy[0]=(short)((1.f-fy)*2048+0.5);
cbufy[1]=2048-cbufy[0];
for(int j=0;j<col/scale;j++)
{
float fx=(float)((j+ 0.5)*scale-0.5);
int sx=(int)fx;
fx-=sx;
if(sx<0)
{
fx=0,sx=0;
}
if(sx>=col-1)
{
fx=0,sx=col-2;
}
short cbufx[2];
cbufx[0]=(short)((1.f-fx)*2048+0.5);
cbufx[1]=2048-cbufx[0];
unsigned char temp=
(img1[sy*col+sx]*cbufx[0]*cbufy[0]+
img1[(sy+1)*col+1*sx]*cbufx[0]*cbufy[1]+
img1[sy*col+(sx+1)]*cbufx[1]*cbufy[0]+
img1[(sy+1)*col+(sx+1)]*cbufx[1]*cbufy[1])>>22;
}
}
__syncthreads();
}
}
void startgpu(cv::Mat image,unsigned char *img1_gpu)
{
hipMalloc((void**)&img1_gpu,sizeof(unsigned char)*image.rows*image.cols);
hipMemcpy(img1_gpu,image.data,sizeof(unsigned char)*image.rows*image.cols,hipMemcpyHostToDevice);
}
void endgpu(unsigned char *img1_gpu)
{
hipFree(img1_gpu);
}
void addgpu(int img_row,int img_col,unsigned char *img1_gpu)
{
hipLaunchKernelGGL(( test_kernel), dim3(1),dim3(1), 0, 0, img1_gpu,img_row,img_col);
hipError_t error=hipGetLastError();
} | 431f0c558a725323bbb0ba404a00d4ca8668e1ee.cu | #include <cuda_runtime.h>
#include <opencv2/opencv.hpp>
#include <vector>
#include <sstream>
#include <opencv2/core/cuda.hpp>
using namespace cv;
using namespace std;
__global__ static void test_kernel(unsigned char *img1,
const int row,
const int col
)
{
const int tid=threadIdx.x;
double scale=1.2f;
for(int level=0;level<10;level++)
{
for(int i=tid;i<row/scale;i+=256)
{
float fy=(float)((i+0.5)*scale-0.5);
int sy=(int)fy;
fy-=sy;
sy=(sy<row-2)?sy:row-2;
sy=(sy<0)?0:sy;
short cbufy[2];
cbufy[0]=(short)((1.f-fy)*2048+0.5);
cbufy[1]=2048-cbufy[0];
for(int j=0;j<col/scale;j++)
{
float fx=(float)((j+ 0.5)*scale-0.5);
int sx=(int)fx;
fx-=sx;
if(sx<0)
{
fx=0,sx=0;
}
if(sx>=col-1)
{
fx=0,sx=col-2;
}
short cbufx[2];
cbufx[0]=(short)((1.f-fx)*2048+0.5);
cbufx[1]=2048-cbufx[0];
unsigned char temp=
(img1[sy*col+sx]*cbufx[0]*cbufy[0]+
img1[(sy+1)*col+1*sx]*cbufx[0]*cbufy[1]+
img1[sy*col+(sx+1)]*cbufx[1]*cbufy[0]+
img1[(sy+1)*col+(sx+1)]*cbufx[1]*cbufy[1])>>22;
}
}
__syncthreads();
}
}
void startgpu(cv::Mat image,unsigned char *img1_gpu)
{
cudaMalloc((void**)&img1_gpu,sizeof(unsigned char)*image.rows*image.cols);
cudaMemcpy(img1_gpu,image.data,sizeof(unsigned char)*image.rows*image.cols,cudaMemcpyHostToDevice);
}
void endgpu(unsigned char *img1_gpu)
{
cudaFree(img1_gpu);
}
void addgpu(int img_row,int img_col,unsigned char *img1_gpu)
{
test_kernel<<<1,1>>>(img1_gpu,img_row,img_col);
cudaError_t error=cudaGetLastError();
} |
a4de8b619e4e83ee4f5cf0880e4cf46602291bd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
//! Function to add the elements of two arrays
__global__ void add(int n, float* x, float* y)
{
for (int i = 0; i < n; ++i)
y[i] = x[i] + y[i];
}
int main(int argc, char* argv[])
{
int N = 1 << 24; //! 1M elements
float* x, *y;
//! Allocates the Unified memory : accessible from both CPU and GPU
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
//! initialize x and y arrays on the host
for (int i = 0; i < N; ++i)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
//! Run kernel on 1M elements on the CPU
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
//! Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
//! Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; ++i)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error : " << maxError << std::endl;
std::cout << "Max error : " << maxError << std::endl;
hipFree(x);
hipFree(y);
return 0;
} | a4de8b619e4e83ee4f5cf0880e4cf46602291bd3.cu | #include <iostream>
#include <math.h>
//! Function to add the elements of two arrays
__global__ void add(int n, float* x, float* y)
{
for (int i = 0; i < n; ++i)
y[i] = x[i] + y[i];
}
int main(int argc, char* argv[])
{
int N = 1 << 24; //! 1M elements
float* x, *y;
//! Allocates the Unified memory : accessible from both CPU and GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
//! initialize x and y arrays on the host
for (int i = 0; i < N; ++i)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
//! Run kernel on 1M elements on the CPU
add<<<1, 1>>>(N, x, y);
//! Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//! Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; ++i)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error : " << maxError << std::endl;
std::cout << "Max error : " << maxError << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
} |
766613ac6db3d1cb6ed831e0562f9f515b346395.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kfusion.h"
#undef isnan
#undef isfinite
#include <iostream>
#include <TooN/TooN.h>
#include <TooN/se3.h>
#include <TooN/GR_SVD.h>
static const float INVALID = -2; // this is used to mark invalid entries in normal or vertex maps
using namespace std;
__global__ void initVolume( Volume volume, const float2 val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z)
volume.set(pos, val);
}
__global__ void raycast( Image<float3> pos3D, Image<float3> normal, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
const uint2 pos = thr2pos2();
const float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep );
if(hit.w > 0){
pos3D[pos] = make_float3(hit);
float3 surfNorm = volume.grad(make_float3(hit));
if(length(surfNorm) == 0){
normal[pos].x = INVALID;
} else {
normal[pos] = normalize(surfNorm);
}
} else {
pos3D[pos] = make_float3(0);
normal[pos] = make_float3(0);
}
}
__forceinline__ __device__ float sq( const float x ){
return x*x;
}
__global__ void integrate( Volume vol, const Image<float> depth, const Matrix4 invTrack, const Matrix4 K, const float mu, const float maxweight){
uint3 pix = make_uint3(thr2pos2());
float3 pos = invTrack * vol.pos(pix);
const float3 delta = rotate(invTrack, make_float3(0,0, vol.dim.z / vol.size.z));
for(pix.z = 0; pix.z < vol.size.z; ++pix.z, pos += delta){
if(pos.z < 0.0001f) // some near plane constraint
continue;
const float3 cameraX = K * pos;
const float2 pixel = make_float2(cameraX.x/cameraX.z + 0.5f, cameraX.y/cameraX.z + 0.5f);
if(pixel.x < 0 || pixel.x > depth.size.x-1 || pixel.y < 0 || pixel.y > depth.size.y-1)
continue;
const uint2 px = make_uint2(pixel.x, pixel.y);
if(depth[px] == 0)
continue;
const float diff = (depth[px] - cameraX.z) * sqrt(1+sq(pos.x/pos.z) + sq(pos.y/pos.z));
if(diff > -mu){
const float sdf = fminf(1.f, diff/mu);
float2 data = vol[pix];
data.x = clamp((data.y*data.x + sdf)/(data.y + 1), -1.f, 1.f);
data.y = fminf(data.y+1, maxweight);
vol.set(pix, data);
}
}
}
__global__ void depth2vertex( Image<float3> vertex, const Image<float> depth, const Matrix4 invK ){
const uint2 pixel = thr2pos2();
if(pixel.x >= depth.size.x || pixel.y >= depth.size.y )
return;
if(depth[pixel] > 0){
vertex[pixel] = depth[pixel] * (rotate(invK, make_float3(pixel.x, pixel.y, 1.f)));
} else {
vertex[pixel] = make_float3(0);
}
}
__global__ void vertex2normal( Image<float3> normal, const Image<float3> vertex ){
const uint2 pixel = thr2pos2();
if(pixel.x >= vertex.size.x || pixel.y >= vertex.size.y )
return;
const float3 left = vertex[make_uint2(max(int(pixel.x)-1,0), pixel.y)];
const float3 right = vertex[make_uint2(min(pixel.x+1,vertex.size.x-1), pixel.y)];
const float3 up = vertex[make_uint2(pixel.x, max(int(pixel.y)-1,0))];
const float3 down = vertex[make_uint2(pixel.x, min(pixel.y+1,vertex.size.y-1))];
if(left.z == 0 || right.z == 0 || up.z == 0 || down.z == 0) {
normal[pixel].x = INVALID;
return;
}
const float3 dxv = right - left;
const float3 dyv = down - up;
normal[pixel] = normalize(cross(dyv, dxv)); // switched dx and dy to get factor -1
}
__forceinline__ __device__ float raw2depth( float d ){
return clamp( 1.0f / (d * -0.0030711016f + 3.3309495161f), 0.f, 30.f);
}
__global__ void raw2cooked( Image<float> depth, const Image<ushort> in ){
depth.el() = raw2depth(in.el());
}
__global__ void raw2cookedHalfSampled( Image<float> depth, const Image<ushort> in ){
const uint2 pixel = thr2pos2();
depth[pixel] = raw2depth(in[pixel * 2]);
}
//column pass using coalesced global memory reads
__global__ void bilateral_filter(Image<float> out, const Image<float> in, const Image<float> gaussian, const float e_d, const int r) {
const uint2 pos = thr2pos2();
if(in[pos] == 0){
out[pos] = 0;
return;
}
float sum = 0.0f;
float t = 0.0f;
const float center = in[pos];
for(int i = -r; i <= r; ++i) {
for(int j = -r; j <= r; ++j) {
const float curPix = in[make_uint2(clamp(pos.x + i, 0u, in.size.x-1), clamp(pos.y + j, 0u, in.size.y-1))];
if(curPix > 0){
const float mod = (curPix - center) * (curPix - center);
const float factor = gaussian[make_uint2(i + r, 0)] * gaussian[make_uint2(j + r, 0)] * __expf(-mod / (2 * e_d * e_d));
t += factor * curPix;
sum += factor;
}
}
}
out[pos] = t / sum;
}
// filter and halfsample
__global__ void halfSampleRobust( Image<float> out, const Image<float> in, const float e_d, const int r){
const uint2 pixel = thr2pos2();
const uint2 centerPixel = 2 * pixel;
if(pixel.x >= out.size.x || pixel.y >= out.size.y )
return;
float sum = 0.0f;
float t = 0.0f;
const float center = in[centerPixel];
for(int i = -r + 1; i <= r; ++i){
for(int j = -r + 1; j <= r; ++j){
float current = in[make_uint2(clamp(make_int2(centerPixel.x + j, centerPixel.y + i), make_int2(0), make_int2(in.size.x - 1, in.size.y - 1)))]; // TODO simplify this!
if(fabsf(current - center) < e_d){
sum += 1.0f;
t += current;
}
}
}
out[pixel] = t / sum;
}
__global__ void generate_gaussian(Image<float> out, float delta, int radius) {
int x = threadIdx.x - radius;
out[make_uint2(threadIdx.x,0)] = __expf(-(x * x) / (2 * delta * delta));
}
__global__ void track( Image<TrackData> output, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ) {
const uint2 pixel = thr2pos2();
if(pixel.x >= inVertex.size.x || pixel.y >= inVertex.size.y )
return;
TrackData & row = output[pixel];
if(inNormal[pixel].x == INVALID ){
row.result = -1;
return;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
row.result = -2;
return;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
const float3 referenceNormal = refNormal[refPixel];
if(referenceNormal.x == INVALID){
row.result = -3;
return;
}
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
row.result = -4;
return;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
row.result = -5;
return;
}
row.result = 1;
row.error = dot(referenceNormal, diff);
((float3 *)row.J)[0] = referenceNormal;
((float3 *)row.J)[1] = cross(projectedVertex, referenceNormal);
}
__global__ void reduce( float * out, const Image<TrackData> J, const uint2 size){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
for(uint y = blockIdx.x; y < size.y; y += gridDim.x){
for(uint x = sline; x < size.x; x += blockDim.x ){
const TrackData & row = J[make_uint2(x, y)];
if(row.result < 1){
info[1] += row.result == -4 ? 1 : 0;
info[2] += row.result == -5 ? 1 : 0;
info[3] += row.result == -2 || row.result == -3 ? 1 : 0;
continue;
}
// Error part
sums[0] += row.error * row.error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += row.error * row.J[i];
// JTJ part, unfortunatly the double loop is not unrolled well...
jtj[0] += row.J[0] * row.J[0];
jtj[1] += row.J[0] * row.J[1];
jtj[2] += row.J[0] * row.J[2];
jtj[3] += row.J[0] * row.J[3];
jtj[4] += row.J[0] * row.J[4];
jtj[5] += row.J[0] * row.J[5];
jtj[6] += row.J[1] * row.J[1];
jtj[7] += row.J[1] * row.J[2];
jtj[8] += row.J[1] * row.J[3];
jtj[9] += row.J[1] * row.J[4];
jtj[10] += row.J[1] * row.J[5];
jtj[11] += row.J[2] * row.J[2];
jtj[12] += row.J[2] * row.J[3];
jtj[13] += row.J[2] * row.J[4];
jtj[14] += row.J[2] * row.J[5];
jtj[15] += row.J[3] * row.J[3];
jtj[16] += row.J[3] * row.J[4];
jtj[17] += row.J[3] * row.J[5];
jtj[18] += row.J[4] * row.J[4];
jtj[19] += row.J[4] * row.J[5];
jtj[20] += row.J[5] * row.J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
__global__ void trackAndReduce( float * out, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
float J[6];
for(uint y = blockIdx.x; y < inVertex.size.y; y += gridDim.x){
for(uint x = sline; x < inVertex.size.x; x += blockDim.x ){
const uint2 pixel = make_uint2(x,y);
if(inNormal[pixel].x == INVALID){
continue;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
info[3] += 1;
continue;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
if(refNormal[refPixel].x == INVALID){
info[3] += 1;
continue;
}
const float3 referenceNormal = refNormal[refPixel];
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
info[1] += 1;
continue;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
info[2] += 1;
continue;
}
const float error = dot(referenceNormal, diff);
((float3 *)J)[0] = referenceNormal;
((float3 *)J)[1] = cross(projectedVertex, referenceNormal);
// Error part
sums[0] += error * error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += error * J[i];
// JTJ part
jtj[0] += J[0] * J[0];
jtj[1] += J[0] * J[1];
jtj[2] += J[0] * J[2];
jtj[3] += J[0] * J[3];
jtj[4] += J[0] * J[4];
jtj[5] += J[0] * J[5];
jtj[6] += J[1] * J[1];
jtj[7] += J[1] * J[2];
jtj[8] += J[1] * J[3];
jtj[9] += J[1] * J[4];
jtj[10] += J[1] * J[5];
jtj[11] += J[2] * J[2];
jtj[12] += J[2] * J[3];
jtj[13] += J[2] * J[4];
jtj[14] += J[2] * J[5];
jtj[15] += J[3] * J[3];
jtj[16] += J[3] * J[4];
jtj[17] += J[3] * J[5];
jtj[18] += J[4] * J[4];
jtj[19] += J[4] * J[5];
jtj[20] += J[5] * J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
void KFusion::Init( const KFusionConfig & config ) {
configuration = config;
hipSetDeviceFlags(hipDeviceMapHost);
integration.init(config.volumeSize, config.volumeDimensions);
reduction.alloc(config.inputSize);
vertex.alloc(config.inputSize);
normal.alloc(config.inputSize);
rawDepth.alloc(config.inputSize);
inputDepth.resize(config.iterations.size());
inputVertex.resize(config.iterations.size());
inputNormal.resize(config.iterations.size());
for(int i = 0; i < config.iterations.size(); ++i){
inputDepth[i].alloc(config.inputSize >> i);
inputVertex[i].alloc(config.inputSize >> i);
inputNormal[i].alloc(config.inputSize >> i);
}
gaussian.alloc(make_uint2(config.radius * 2 + 1, 1));
output.alloc(make_uint2(32,8));
//generate gaussian array
hipLaunchKernelGGL(( generate_gaussian), dim3(1), dim3(gaussian.size.x), 0, 0, gaussian, config.delta, config.radius);
Reset();
}
void KFusion::Reset(){
dim3 block(32,16);
dim3 grid = divup(dim3(integration.size.x, integration.size.y), block);
hipLaunchKernelGGL(( initVolume), dim3(grid), dim3(block), 0, 0, integration, make_float2(1.0f, 0.0f));
}
void KFusion::Clear(){
integration.release();
}
void KFusion::setPose( const Matrix4 & p ){
pose = p;
}
void KFusion::setKinectDeviceDepth( const Image<uint16_t> & in ){
if(configuration.inputSize.x == in.size.x)
hipLaunchKernelGGL(( raw2cooked), dim3(divup(rawDepth.size, configuration.imageBlock)), dim3(configuration.imageBlock), 0, 0, rawDepth, in );
else if(configuration.inputSize.x == in.size.x / 2 )
hipLaunchKernelGGL(( raw2cookedHalfSampled), dim3(divup(rawDepth.size, configuration.imageBlock)), dim3(configuration.imageBlock), 0, 0, rawDepth, in );
else
assert(false);
}
Matrix4 operator*( const Matrix4 & A, const Matrix4 & B){
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::wrapMatrix<4,4>(&A.data[0].x) * TooN::wrapMatrix<4,4>(&B.data[0].x);
return R;
}
template<typename P>
inline Matrix4 toMatrix4( const TooN::SE3<P> & p){
const TooN::Matrix<4, 4, float> I = TooN::Identity;
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = p * I;
return R;
}
Matrix4 inverse( const Matrix4 & A ){
static TooN::Matrix<4, 4, float> I = TooN::Identity;
TooN::Matrix<4,4,float> temp = TooN::wrapMatrix<4,4>(&A.data[0].x);
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::gaussian_elimination(temp , I );
return R;
}
std::ostream & operator<<( std::ostream & out, const Matrix4 & m ){
for(unsigned i = 0; i < 4; ++i)
out << m.data[i].x << " " << m.data[i].y << " " << m.data[i].z << " " << m.data[i].w << "\n";
return out;
}
template <typename P, typename A>
TooN::Matrix<6> makeJTJ( const TooN::Vector<21, P, A> & v ){
TooN::Matrix<6> C = TooN::Zeros;
C[0] = v.template slice<0,6>();
C[1].template slice<1,5>() = v.template slice<6,5>();
C[2].template slice<2,4>() = v.template slice<11,4>();
C[3].template slice<3,3>() = v.template slice<15,3>();
C[4].template slice<4,2>() = v.template slice<18,2>();
C[5][5] = v[20];
for(int r = 1; r < 6; ++r)
for(int c = 0; c < r; ++c)
C[r][c] = C[c][r];
return C;
}
template <typename T, typename A>
TooN::Vector<6> solve( const TooN::Vector<27, T, A> & vals ){
const TooN::Vector<6> b = vals.template slice<0,6>();
const TooN::Matrix<6> C = makeJTJ(vals.template slice<6,21>());
TooN::GR_SVD<6,6> svd(C);
return svd.backsub(b, 1e6);
}
bool KFusion::Track() {
const Matrix4 invK = getInverseCameraMatrix(configuration.camera);
vector<dim3> grids;
for(int i = 0; i < configuration.iterations.size(); ++i)
grids.push_back(divup(configuration.inputSize >> i, configuration.imageBlock));
// raycast integration volume into the depth, vertex, normal buffers
hipLaunchKernelGGL(( raycast), dim3(divup(configuration.inputSize, configuration.raycastBlock)), dim3(configuration.raycastBlock), 0, 0, vertex, normal, integration, pose * invK, configuration.nearPlane, configuration.farPlane, configuration.stepSize(), 0.75 * configuration.mu);
// filter the input depth map
hipLaunchKernelGGL(( bilateral_filter), dim3(grids[0]), dim3(configuration.imageBlock), 0, 0, inputDepth[0], rawDepth, gaussian, configuration.e_delta, configuration.radius);
// half sample the input depth maps into the pyramid levels
for(int i = 1; i < configuration.iterations.size(); ++i)
hipLaunchKernelGGL(( halfSampleRobust), dim3(grids[i]), dim3(configuration.imageBlock), 0, 0, inputDepth[i], inputDepth[i-1], configuration.e_delta * 3, 1);
// prepare the 3D information from the input depth maps
for(int i = 0; i < configuration.iterations.size(); ++i){
hipLaunchKernelGGL(( depth2vertex), dim3(grids[i]), dim3(configuration.imageBlock), 0, 0, inputVertex[i], inputDepth[i], getInverseCameraMatrix(configuration.camera / (1 << i))); // inverse camera matrix depends on level
hipLaunchKernelGGL(( vertex2normal), dim3(grids[i]), dim3(configuration.imageBlock), 0, 0, inputNormal[i], inputVertex[i] );
}
const Matrix4 oldPose = pose;
const Matrix4 projectReference = getCameraMatrix(configuration.camera) * inverse(pose);
TooN::Matrix<8, 32, float, TooN::Reference::RowMajor> values(output.data());
for(int level = configuration.iterations.size()-1; level >= 0; --level){
for(int i = 0; i < configuration.iterations[level]; ++i){
if(configuration.combinedTrackAndReduce){
hipLaunchKernelGGL(( trackAndReduce), dim3(8), dim3(112), 0, 0, output.getDeviceImage().data(), inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold );
} else {
hipLaunchKernelGGL(( track), dim3(grids[level]), dim3(configuration.imageBlock), 0, 0, reduction, inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold);
hipLaunchKernelGGL(( reduce), dim3(8), dim3(112), 0, 0, output.getDeviceImage().data(), reduction, inputVertex[level].size ); // compute the linear system to solve
}
hipDeviceSynchronize(); // important due to async nature of kernel call
for(int j = 1; j < 8; ++j)
values[0] += values[j];
TooN::Vector<6> x = solve(values[0].slice<1,27>());
TooN::SE3<> delta(x);
pose = toMatrix4( delta ) * pose;
if(norm(x) < 1e-5)
break;
}
}
if(sqrt(values(0,0) / values(0,28)) > 2e-2){
pose = oldPose;
return false;
}
return true;
}
void KFusion::Integrate() {
hipLaunchKernelGGL(( integrate), dim3(divup(dim3(integration.size.x, integration.size.y), configuration.imageBlock)), dim3(configuration.imageBlock), 0, 0, integration, rawDepth, inverse(pose), getCameraMatrix(configuration.camera), configuration.mu, configuration.maxweight );
}
int printCUDAError() {
hipError_t error = hipGetLastError();
if(error)
std::cout << hipGetErrorString(error) << std::endl;
return error;
}
| 766613ac6db3d1cb6ed831e0562f9f515b346395.cu | #include "kfusion.h"
#undef isnan
#undef isfinite
#include <iostream>
#include <TooN/TooN.h>
#include <TooN/se3.h>
#include <TooN/GR_SVD.h>
static const float INVALID = -2; // this is used to mark invalid entries in normal or vertex maps
using namespace std;
__global__ void initVolume( Volume volume, const float2 val ){
uint3 pos = make_uint3(thr2pos2());
for(pos.z = 0; pos.z < volume.size.z; ++pos.z)
volume.set(pos, val);
}
__global__ void raycast( Image<float3> pos3D, Image<float3> normal, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){
const uint2 pos = thr2pos2();
const float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep );
if(hit.w > 0){
pos3D[pos] = make_float3(hit);
float3 surfNorm = volume.grad(make_float3(hit));
if(length(surfNorm) == 0){
normal[pos].x = INVALID;
} else {
normal[pos] = normalize(surfNorm);
}
} else {
pos3D[pos] = make_float3(0);
normal[pos] = make_float3(0);
}
}
__forceinline__ __device__ float sq( const float x ){
return x*x;
}
__global__ void integrate( Volume vol, const Image<float> depth, const Matrix4 invTrack, const Matrix4 K, const float mu, const float maxweight){
uint3 pix = make_uint3(thr2pos2());
float3 pos = invTrack * vol.pos(pix);
const float3 delta = rotate(invTrack, make_float3(0,0, vol.dim.z / vol.size.z));
for(pix.z = 0; pix.z < vol.size.z; ++pix.z, pos += delta){
if(pos.z < 0.0001f) // some near plane constraint
continue;
const float3 cameraX = K * pos;
const float2 pixel = make_float2(cameraX.x/cameraX.z + 0.5f, cameraX.y/cameraX.z + 0.5f);
if(pixel.x < 0 || pixel.x > depth.size.x-1 || pixel.y < 0 || pixel.y > depth.size.y-1)
continue;
const uint2 px = make_uint2(pixel.x, pixel.y);
if(depth[px] == 0)
continue;
const float diff = (depth[px] - cameraX.z) * sqrt(1+sq(pos.x/pos.z) + sq(pos.y/pos.z));
if(diff > -mu){
const float sdf = fminf(1.f, diff/mu);
float2 data = vol[pix];
data.x = clamp((data.y*data.x + sdf)/(data.y + 1), -1.f, 1.f);
data.y = fminf(data.y+1, maxweight);
vol.set(pix, data);
}
}
}
__global__ void depth2vertex( Image<float3> vertex, const Image<float> depth, const Matrix4 invK ){
const uint2 pixel = thr2pos2();
if(pixel.x >= depth.size.x || pixel.y >= depth.size.y )
return;
if(depth[pixel] > 0){
vertex[pixel] = depth[pixel] * (rotate(invK, make_float3(pixel.x, pixel.y, 1.f)));
} else {
vertex[pixel] = make_float3(0);
}
}
__global__ void vertex2normal( Image<float3> normal, const Image<float3> vertex ){
const uint2 pixel = thr2pos2();
if(pixel.x >= vertex.size.x || pixel.y >= vertex.size.y )
return;
const float3 left = vertex[make_uint2(max(int(pixel.x)-1,0), pixel.y)];
const float3 right = vertex[make_uint2(min(pixel.x+1,vertex.size.x-1), pixel.y)];
const float3 up = vertex[make_uint2(pixel.x, max(int(pixel.y)-1,0))];
const float3 down = vertex[make_uint2(pixel.x, min(pixel.y+1,vertex.size.y-1))];
if(left.z == 0 || right.z == 0 || up.z == 0 || down.z == 0) {
normal[pixel].x = INVALID;
return;
}
const float3 dxv = right - left;
const float3 dyv = down - up;
normal[pixel] = normalize(cross(dyv, dxv)); // switched dx and dy to get factor -1
}
__forceinline__ __device__ float raw2depth( float d ){
return clamp( 1.0f / (d * -0.0030711016f + 3.3309495161f), 0.f, 30.f);
}
__global__ void raw2cooked( Image<float> depth, const Image<ushort> in ){
depth.el() = raw2depth(in.el());
}
__global__ void raw2cookedHalfSampled( Image<float> depth, const Image<ushort> in ){
const uint2 pixel = thr2pos2();
depth[pixel] = raw2depth(in[pixel * 2]);
}
//column pass using coalesced global memory reads
__global__ void bilateral_filter(Image<float> out, const Image<float> in, const Image<float> gaussian, const float e_d, const int r) {
const uint2 pos = thr2pos2();
if(in[pos] == 0){
out[pos] = 0;
return;
}
float sum = 0.0f;
float t = 0.0f;
const float center = in[pos];
for(int i = -r; i <= r; ++i) {
for(int j = -r; j <= r; ++j) {
const float curPix = in[make_uint2(clamp(pos.x + i, 0u, in.size.x-1), clamp(pos.y + j, 0u, in.size.y-1))];
if(curPix > 0){
const float mod = (curPix - center) * (curPix - center);
const float factor = gaussian[make_uint2(i + r, 0)] * gaussian[make_uint2(j + r, 0)] * __expf(-mod / (2 * e_d * e_d));
t += factor * curPix;
sum += factor;
}
}
}
out[pos] = t / sum;
}
// filter and halfsample
__global__ void halfSampleRobust( Image<float> out, const Image<float> in, const float e_d, const int r){
const uint2 pixel = thr2pos2();
const uint2 centerPixel = 2 * pixel;
if(pixel.x >= out.size.x || pixel.y >= out.size.y )
return;
float sum = 0.0f;
float t = 0.0f;
const float center = in[centerPixel];
for(int i = -r + 1; i <= r; ++i){
for(int j = -r + 1; j <= r; ++j){
float current = in[make_uint2(clamp(make_int2(centerPixel.x + j, centerPixel.y + i), make_int2(0), make_int2(in.size.x - 1, in.size.y - 1)))]; // TODO simplify this!
if(fabsf(current - center) < e_d){
sum += 1.0f;
t += current;
}
}
}
out[pixel] = t / sum;
}
__global__ void generate_gaussian(Image<float> out, float delta, int radius) {
int x = threadIdx.x - radius;
out[make_uint2(threadIdx.x,0)] = __expf(-(x * x) / (2 * delta * delta));
}
__global__ void track( Image<TrackData> output, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ) {
const uint2 pixel = thr2pos2();
if(pixel.x >= inVertex.size.x || pixel.y >= inVertex.size.y )
return;
TrackData & row = output[pixel];
if(inNormal[pixel].x == INVALID ){
row.result = -1;
return;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
row.result = -2;
return;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
const float3 referenceNormal = refNormal[refPixel];
if(referenceNormal.x == INVALID){
row.result = -3;
return;
}
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
row.result = -4;
return;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
row.result = -5;
return;
}
row.result = 1;
row.error = dot(referenceNormal, diff);
((float3 *)row.J)[0] = referenceNormal;
((float3 *)row.J)[1] = cross(projectedVertex, referenceNormal);
}
__global__ void reduce( float * out, const Image<TrackData> J, const uint2 size){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
for(uint y = blockIdx.x; y < size.y; y += gridDim.x){
for(uint x = sline; x < size.x; x += blockDim.x ){
const TrackData & row = J[make_uint2(x, y)];
if(row.result < 1){
info[1] += row.result == -4 ? 1 : 0;
info[2] += row.result == -5 ? 1 : 0;
info[3] += row.result == -2 || row.result == -3 ? 1 : 0;
continue;
}
// Error part
sums[0] += row.error * row.error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += row.error * row.J[i];
// JTJ part, unfortunatly the double loop is not unrolled well...
jtj[0] += row.J[0] * row.J[0];
jtj[1] += row.J[0] * row.J[1];
jtj[2] += row.J[0] * row.J[2];
jtj[3] += row.J[0] * row.J[3];
jtj[4] += row.J[0] * row.J[4];
jtj[5] += row.J[0] * row.J[5];
jtj[6] += row.J[1] * row.J[1];
jtj[7] += row.J[1] * row.J[2];
jtj[8] += row.J[1] * row.J[3];
jtj[9] += row.J[1] * row.J[4];
jtj[10] += row.J[1] * row.J[5];
jtj[11] += row.J[2] * row.J[2];
jtj[12] += row.J[2] * row.J[3];
jtj[13] += row.J[2] * row.J[4];
jtj[14] += row.J[2] * row.J[5];
jtj[15] += row.J[3] * row.J[3];
jtj[16] += row.J[3] * row.J[4];
jtj[17] += row.J[3] * row.J[5];
jtj[18] += row.J[4] * row.J[4];
jtj[19] += row.J[4] * row.J[5];
jtj[20] += row.J[5] * row.J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
__global__ void trackAndReduce( float * out, const Image<float3> inVertex, const Image<float3> inNormal , const Image<float3> refVertex, const Image<float3> refNormal, const Matrix4 Ttrack, const Matrix4 view, const float dist_threshold, const float normal_threshold ){
__shared__ float S[112][32]; // this is for the final accumulation
const uint sline = threadIdx.x;
float sums[32];
float * jtj = sums + 7;
float * info = sums + 28;
for(uint i = 0; i < 32; ++i)
sums[i] = 0;
float J[6];
for(uint y = blockIdx.x; y < inVertex.size.y; y += gridDim.x){
for(uint x = sline; x < inVertex.size.x; x += blockDim.x ){
const uint2 pixel = make_uint2(x,y);
if(inNormal[pixel].x == INVALID){
continue;
}
const float3 projectedVertex = Ttrack * inVertex[pixel];
const float3 projectedPos = view * projectedVertex;
const float2 projPixel = make_float2( projectedPos.x / projectedPos.z + 0.5f, projectedPos.y / projectedPos.z + 0.5f);
if(projPixel.x < 0 || projPixel.x > refVertex.size.x-1 || projPixel.y < 0 || projPixel.y > refVertex.size.y-1 ){
info[3] += 1;
continue;
}
const uint2 refPixel = make_uint2(projPixel.x, projPixel.y);
if(refNormal[refPixel].x == INVALID){
info[3] += 1;
continue;
}
const float3 referenceNormal = refNormal[refPixel];
const float3 diff = refVertex[refPixel] - projectedVertex;
const float3 projectedNormal = rotate(Ttrack, inNormal[pixel]);
if(length(diff) > dist_threshold ){
info[1] += 1;
continue;
}
if(dot(projectedNormal, referenceNormal) < normal_threshold){
info[2] += 1;
continue;
}
const float error = dot(referenceNormal, diff);
((float3 *)J)[0] = referenceNormal;
((float3 *)J)[1] = cross(projectedVertex, referenceNormal);
// Error part
sums[0] += error * error;
// JTe part
for(int i = 0; i < 6; ++i)
sums[i+1] += error * J[i];
// JTJ part
jtj[0] += J[0] * J[0];
jtj[1] += J[0] * J[1];
jtj[2] += J[0] * J[2];
jtj[3] += J[0] * J[3];
jtj[4] += J[0] * J[4];
jtj[5] += J[0] * J[5];
jtj[6] += J[1] * J[1];
jtj[7] += J[1] * J[2];
jtj[8] += J[1] * J[3];
jtj[9] += J[1] * J[4];
jtj[10] += J[1] * J[5];
jtj[11] += J[2] * J[2];
jtj[12] += J[2] * J[3];
jtj[13] += J[2] * J[4];
jtj[14] += J[2] * J[5];
jtj[15] += J[3] * J[3];
jtj[16] += J[3] * J[4];
jtj[17] += J[3] * J[5];
jtj[18] += J[4] * J[4];
jtj[19] += J[4] * J[5];
jtj[20] += J[5] * J[5];
// extra info here
info[0] += 1;
}
}
for(int i = 0; i < 32; ++i) // copy over to shared memory
S[sline][i] = sums[i];
__syncthreads(); // wait for everyone to finish
if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads
for(unsigned i = 1; i < blockDim.x; ++i)
S[0][sline] += S[i][sline];
out[sline+blockIdx.x*32] = S[0][sline];
}
}
void KFusion::Init( const KFusionConfig & config ) {
configuration = config;
cudaSetDeviceFlags(cudaDeviceMapHost);
integration.init(config.volumeSize, config.volumeDimensions);
reduction.alloc(config.inputSize);
vertex.alloc(config.inputSize);
normal.alloc(config.inputSize);
rawDepth.alloc(config.inputSize);
inputDepth.resize(config.iterations.size());
inputVertex.resize(config.iterations.size());
inputNormal.resize(config.iterations.size());
for(int i = 0; i < config.iterations.size(); ++i){
inputDepth[i].alloc(config.inputSize >> i);
inputVertex[i].alloc(config.inputSize >> i);
inputNormal[i].alloc(config.inputSize >> i);
}
gaussian.alloc(make_uint2(config.radius * 2 + 1, 1));
output.alloc(make_uint2(32,8));
//generate gaussian array
generate_gaussian<<< 1, gaussian.size.x>>>(gaussian, config.delta, config.radius);
Reset();
}
void KFusion::Reset(){
dim3 block(32,16);
dim3 grid = divup(dim3(integration.size.x, integration.size.y), block);
initVolume<<<grid, block>>>(integration, make_float2(1.0f, 0.0f));
}
void KFusion::Clear(){
integration.release();
}
void KFusion::setPose( const Matrix4 & p ){
pose = p;
}
void KFusion::setKinectDeviceDepth( const Image<uint16_t> & in ){
if(configuration.inputSize.x == in.size.x)
raw2cooked<<<divup(rawDepth.size, configuration.imageBlock), configuration.imageBlock>>>( rawDepth, in );
else if(configuration.inputSize.x == in.size.x / 2 )
raw2cookedHalfSampled<<<divup(rawDepth.size, configuration.imageBlock), configuration.imageBlock>>>( rawDepth, in );
else
assert(false);
}
Matrix4 operator*( const Matrix4 & A, const Matrix4 & B){
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::wrapMatrix<4,4>(&A.data[0].x) * TooN::wrapMatrix<4,4>(&B.data[0].x);
return R;
}
template<typename P>
inline Matrix4 toMatrix4( const TooN::SE3<P> & p){
const TooN::Matrix<4, 4, float> I = TooN::Identity;
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = p * I;
return R;
}
Matrix4 inverse( const Matrix4 & A ){
static TooN::Matrix<4, 4, float> I = TooN::Identity;
TooN::Matrix<4,4,float> temp = TooN::wrapMatrix<4,4>(&A.data[0].x);
Matrix4 R;
TooN::wrapMatrix<4,4>(&R.data[0].x) = TooN::gaussian_elimination(temp , I );
return R;
}
std::ostream & operator<<( std::ostream & out, const Matrix4 & m ){
for(unsigned i = 0; i < 4; ++i)
out << m.data[i].x << " " << m.data[i].y << " " << m.data[i].z << " " << m.data[i].w << "\n";
return out;
}
template <typename P, typename A>
TooN::Matrix<6> makeJTJ( const TooN::Vector<21, P, A> & v ){
TooN::Matrix<6> C = TooN::Zeros;
C[0] = v.template slice<0,6>();
C[1].template slice<1,5>() = v.template slice<6,5>();
C[2].template slice<2,4>() = v.template slice<11,4>();
C[3].template slice<3,3>() = v.template slice<15,3>();
C[4].template slice<4,2>() = v.template slice<18,2>();
C[5][5] = v[20];
for(int r = 1; r < 6; ++r)
for(int c = 0; c < r; ++c)
C[r][c] = C[c][r];
return C;
}
template <typename T, typename A>
TooN::Vector<6> solve( const TooN::Vector<27, T, A> & vals ){
const TooN::Vector<6> b = vals.template slice<0,6>();
const TooN::Matrix<6> C = makeJTJ(vals.template slice<6,21>());
TooN::GR_SVD<6,6> svd(C);
return svd.backsub(b, 1e6);
}
bool KFusion::Track() {
const Matrix4 invK = getInverseCameraMatrix(configuration.camera);
vector<dim3> grids;
for(int i = 0; i < configuration.iterations.size(); ++i)
grids.push_back(divup(configuration.inputSize >> i, configuration.imageBlock));
// raycast integration volume into the depth, vertex, normal buffers
raycast<<<divup(configuration.inputSize, configuration.raycastBlock), configuration.raycastBlock>>>(vertex, normal, integration, pose * invK, configuration.nearPlane, configuration.farPlane, configuration.stepSize(), 0.75 * configuration.mu);
// filter the input depth map
bilateral_filter<<<grids[0], configuration.imageBlock>>>(inputDepth[0], rawDepth, gaussian, configuration.e_delta, configuration.radius);
// half sample the input depth maps into the pyramid levels
for(int i = 1; i < configuration.iterations.size(); ++i)
halfSampleRobust<<<grids[i], configuration.imageBlock>>>(inputDepth[i], inputDepth[i-1], configuration.e_delta * 3, 1);
// prepare the 3D information from the input depth maps
for(int i = 0; i < configuration.iterations.size(); ++i){
depth2vertex<<<grids[i], configuration.imageBlock>>>( inputVertex[i], inputDepth[i], getInverseCameraMatrix(configuration.camera / (1 << i))); // inverse camera matrix depends on level
vertex2normal<<<grids[i], configuration.imageBlock>>>( inputNormal[i], inputVertex[i] );
}
const Matrix4 oldPose = pose;
const Matrix4 projectReference = getCameraMatrix(configuration.camera) * inverse(pose);
TooN::Matrix<8, 32, float, TooN::Reference::RowMajor> values(output.data());
for(int level = configuration.iterations.size()-1; level >= 0; --level){
for(int i = 0; i < configuration.iterations[level]; ++i){
if(configuration.combinedTrackAndReduce){
trackAndReduce<<<8, 112>>>( output.getDeviceImage().data(), inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold );
} else {
track<<<grids[level], configuration.imageBlock>>>( reduction, inputVertex[level], inputNormal[level], vertex, normal, pose, projectReference, configuration.dist_threshold, configuration.normal_threshold);
reduce<<<8, 112>>>( output.getDeviceImage().data(), reduction, inputVertex[level].size ); // compute the linear system to solve
}
cudaDeviceSynchronize(); // important due to async nature of kernel call
for(int j = 1; j < 8; ++j)
values[0] += values[j];
TooN::Vector<6> x = solve(values[0].slice<1,27>());
TooN::SE3<> delta(x);
pose = toMatrix4( delta ) * pose;
if(norm(x) < 1e-5)
break;
}
}
if(sqrt(values(0,0) / values(0,28)) > 2e-2){
pose = oldPose;
return false;
}
return true;
}
void KFusion::Integrate() {
integrate<<<divup(dim3(integration.size.x, integration.size.y), configuration.imageBlock), configuration.imageBlock>>>( integration, rawDepth, inverse(pose), getCameraMatrix(configuration.camera), configuration.mu, configuration.maxweight );
}
int printCUDAError() {
cudaError_t error = cudaGetLastError();
if(error)
std::cout << cudaGetErrorString(error) << std::endl;
return error;
}
|
cd0dc5f51342da871d05b0d89255fb8f5ac76b61.hip | // !!! This is a file automatically generated by hipify!!!
/* This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "model.h"
__device__ static Nodes nodes;
__device__ static uint population;
__device__ static int lookahead;
__device__ static int mean;
char malloc_nodes(uint n_nodes) {
hipError_t err;
Nodes h_nodes;
err = hipMalloc(&(h_nodes.cr_state),
sizeof(hiprandState_t) * n_nodes);
if (err != hipSuccess) { return 0; }
hipMemcpyToSymbol(nodes, &h_nodes, sizeof(Nodes));
return 1;
}
void free_nodes() {
Nodes h_nodes;
hipMemcpyFromSymbol(&h_nodes, nodes, sizeof(Nodes));
hipFree(h_nodes.cr_state);
}
__device__
void set_model_params(int params[], uint n_params) {
population = params[0];
lookahead = params[1];
mean = params[2];
}
__device__
int get_lookahead() {
return lookahead;
}
__device__
void init_node(uint nid) {
hiprand_init(nid, 0, 0, &(nodes.cr_state[nid]));
uint n_events = population/ g_n_nodes;
if (nid < population % g_n_nodes) { n_events += 1; }
for (uint i = 0; i < n_events; i++) {
Event event;
event.type = 1;
event.sender = nid;
event.receiver = nid;
event.timestamp = i;
append_event_to_queue(&event);
}
}
__device__ // private
char handle_event_type_1(Event *event) {
uint nid = event->receiver;
hiprandState_t *cr_state = &(nodes.cr_state[nid]);
State old_state;
old_state.cr_state = *cr_state;
Event new_event;
new_event.type = 1;
new_event.sender = nid;
new_event.receiver = random(cr_state, g_n_nodes);
new_event.timestamp = event->timestamp + lookahead +
random_exp(cr_state, mean);
char res = append_event_to_queue(&new_event);
if (res == 0) {
nodes.cr_state[nid] = old_state.cr_state;
return 11;
}
return 1;
}
__device__
char handle_event(Event *event) {
uint type = event->type;
if (type == 1) {
return handle_event_type_1(event);
} else {
return 0;
}
}
__device__
void collect_statistics(uint nid) {
return;
}
__device__
void print_statistics() {
printf("STATISTICS NOT AVAILABLE\n");
}
| cd0dc5f51342da871d05b0d89255fb8f5ac76b61.cu | /* This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "model.h"
__device__ static Nodes nodes;
__device__ static uint population;
__device__ static int lookahead;
__device__ static int mean;
char malloc_nodes(uint n_nodes) {
cudaError_t err;
Nodes h_nodes;
err = cudaMalloc(&(h_nodes.cr_state),
sizeof(curandState_t) * n_nodes);
if (err != cudaSuccess) { return 0; }
cudaMemcpyToSymbol(nodes, &h_nodes, sizeof(Nodes));
return 1;
}
void free_nodes() {
Nodes h_nodes;
cudaMemcpyFromSymbol(&h_nodes, nodes, sizeof(Nodes));
cudaFree(h_nodes.cr_state);
}
__device__
void set_model_params(int params[], uint n_params) {
population = params[0];
lookahead = params[1];
mean = params[2];
}
__device__
int get_lookahead() {
return lookahead;
}
__device__
void init_node(uint nid) {
curand_init(nid, 0, 0, &(nodes.cr_state[nid]));
uint n_events = population/ g_n_nodes;
if (nid < population % g_n_nodes) { n_events += 1; }
for (uint i = 0; i < n_events; i++) {
Event event;
event.type = 1;
event.sender = nid;
event.receiver = nid;
event.timestamp = i;
append_event_to_queue(&event);
}
}
__device__ // private
char handle_event_type_1(Event *event) {
uint nid = event->receiver;
curandState_t *cr_state = &(nodes.cr_state[nid]);
State old_state;
old_state.cr_state = *cr_state;
Event new_event;
new_event.type = 1;
new_event.sender = nid;
new_event.receiver = random(cr_state, g_n_nodes);
new_event.timestamp = event->timestamp + lookahead +
random_exp(cr_state, mean);
char res = append_event_to_queue(&new_event);
if (res == 0) {
nodes.cr_state[nid] = old_state.cr_state;
return 11;
}
return 1;
}
__device__
char handle_event(Event *event) {
uint type = event->type;
if (type == 1) {
return handle_event_type_1(event);
} else {
return 0;
}
}
__device__
void collect_statistics(uint nid) {
return;
}
__device__
void print_statistics() {
printf("STATISTICS NOT AVAILABLE\n");
}
|
603cebd4e51a667a7bc2f2cb19d9cce9dcb45f33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GridTools
*
* Copyright (c) 2014-2019, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "../../tools/triplet.hpp"
#include "gtest/gtest.h"
#include <gridtools/common/gt_assert.hpp>
#include <gridtools/storage/data_store.hpp>
#include <gridtools/storage/storage_cuda/cuda_storage.hpp>
#include <gridtools/storage/storage_cuda/cuda_storage_info.hpp>
#include <gridtools/storage/storage_cuda/data_view_helpers.hpp>
using namespace gridtools;
const int c_x = 3 /* < 32 for this test */, c_y = 5, c_z = 7;
template <typename View>
__global__ void mul2(View s) {
bool correct_dims = (s.template total_length<0>() == c_x) && (s.template total_length<1>() == c_y) &&
(s.template total_length<2>() == c_z);
bool correct_size = (s.padded_total_length() == 32 * c_y * c_z);
s(0, 0, 0) *= (2 * correct_dims * correct_size);
s(1, 0, 0) *= (2 * correct_dims * correct_size);
}
TEST(DataViewTest, Simple) {
typedef cuda_storage_info<0, layout_map<2, 1, 0>> storage_info_t;
typedef data_store<cuda_storage<double>, storage_info_t> data_store_t;
// create and allocate a data_store
constexpr storage_info_t si(c_x, c_y, c_z);
data_store_t ds(si);
// create a rw view and fill with some data
data_view<data_store_t> dv = make_host_view(ds);
GT_STATIC_ASSERT((is_data_view<decltype(dv)>::value), "is_data_view check failed");
dv(0, 0, 0) = 50;
dv(1, 0, 0) = 60;
// check if interface works
ASSERT_TRUE((si.length<0>() == dv.length<0>()));
ASSERT_TRUE((si.length<1>() == dv.length<1>()));
ASSERT_TRUE((si.length<2>() == dv.length<2>()));
ASSERT_TRUE((si.total_length<0>() == dv.total_length<0>()));
ASSERT_TRUE((si.total_length<1>() == dv.total_length<1>()));
ASSERT_TRUE((si.total_length<2>() == dv.total_length<2>()));
ASSERT_TRUE((si.begin<0>() == dv.begin<0>()));
ASSERT_TRUE((si.begin<1>() == dv.begin<1>()));
ASSERT_TRUE((si.begin<2>() == dv.begin<2>()));
ASSERT_TRUE((si.total_begin<0>() == dv.total_begin<0>()));
ASSERT_TRUE((si.total_begin<1>() == dv.total_begin<1>()));
ASSERT_TRUE((si.total_begin<2>() == dv.total_begin<2>()));
ASSERT_TRUE((si.end<0>() == dv.end<0>()));
ASSERT_TRUE((si.end<1>() == dv.end<1>()));
ASSERT_TRUE((si.end<2>() == dv.end<2>()));
ASSERT_TRUE((si.total_end<0>() == dv.total_end<0>()));
ASSERT_TRUE((si.total_end<1>() == dv.total_end<1>()));
ASSERT_TRUE((si.total_end<2>() == dv.total_end<2>()));
ASSERT_TRUE((si.padded_total_length() == dv.padded_total_length()));
// check if the user protections are working
#if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ <= 9
// CUDA10 does not like to evaluate storage_info functions constexpr as a member (m_gpu_ptr) is mutable
static_assert(si.index(1, 0, 0) == 1, "constexpr index method call failed");
#endif
ASSERT_TRUE(si.index(1, 0, 1) == c_y * 32 + 1);
// check if data is there
EXPECT_EQ(50, dv(0, 0, 0));
EXPECT_EQ(dv(1, 0, 0), 60);
// create a ro view
data_view<data_store_t, access_mode::read_only> dvro = make_host_view<access_mode::read_only>(ds);
// check if data is the same
EXPECT_EQ(50, dvro(0, 0, 0));
EXPECT_EQ(dvro(1, 0, 0), 60);
// views are valid (ds <--> dv and ds <--> dvro)
EXPECT_TRUE(check_consistency(ds, dv));
EXPECT_TRUE(check_consistency(ds, dvro));
EXPECT_TRUE(dv.valid());
EXPECT_TRUE(dvro.valid());
// sync, create a device view and call kernel
ds.sync();
auto devv = make_device_view(ds);
GT_STATIC_ASSERT((is_data_view<decltype(devv)>::value), "is_data_view check failed");
EXPECT_TRUE(check_consistency(ds, devv));
EXPECT_FALSE(check_consistency(ds, dv));
EXPECT_FALSE(check_consistency(ds, dvro));
EXPECT_TRUE(devv.valid());
EXPECT_FALSE(dv.valid());
EXPECT_FALSE(dvro.valid());
hipLaunchKernelGGL(( mul2), dim3(1), dim3(1), 0, 0, devv);
// sync and check if read only host view is valid
ds.sync();
EXPECT_FALSE(check_consistency(ds, devv));
EXPECT_FALSE(check_consistency(ds, dv));
EXPECT_TRUE(check_consistency(ds, dvro));
EXPECT_FALSE(devv.valid());
EXPECT_FALSE(dv.valid());
EXPECT_TRUE(dvro.valid());
// check if data is the same
EXPECT_EQ(100, dvro(0, 0, 0));
EXPECT_EQ(dvro(1, 0, 0), 120);
// create and allocate a second storage
data_store_t ds_tmp(si);
// again create a view
data_view<data_store_t> dv_tmp = make_host_view<access_mode::read_write>(ds_tmp);
// the combination ds_tmp <--> dv/dvro is not a valid view
EXPECT_FALSE(check_consistency(ds, dv_tmp));
EXPECT_FALSE(check_consistency(ds_tmp, devv));
EXPECT_FALSE(check_consistency(ds_tmp, dvro));
EXPECT_TRUE(check_consistency(ds_tmp, dv_tmp));
EXPECT_TRUE(dv_tmp.valid());
EXPECT_FALSE(devv.valid());
EXPECT_TRUE(dvro.valid());
EXPECT_TRUE(dv_tmp.valid());
// destroy a storage, this should also invalidate the views
ds.reset();
EXPECT_FALSE(check_consistency(ds, dv));
EXPECT_FALSE(check_consistency(ds, dvro));
}
TEST(DataViewTest, ZeroSize) {
typedef cuda_storage_info<0, layout_map<0>> storage_info_t;
typedef data_store<cuda_storage<double>, storage_info_t> data_store_t;
// create and allocate a data_store
data_store_t ds;
make_host_view<access_mode::read_only>(ds);
make_device_view<access_mode::read_only>(ds);
}
TEST(DataViewTest, Looping) {
typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t;
storage_info_t si(2 + 2, 2 + 4, 2 + 6);
typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t;
data_store_t ds(si, [](int i, int j, int k) { return triplet(i, j, k); }, "ds");
auto view = make_host_view<access_mode::read_write>(ds);
for (int i = view.begin<0>(); i <= view.end<0>(); ++i) {
for (int j = view.begin<1>(); j <= view.end<1>(); ++j) {
for (int k = view.begin<2>(); k <= view.end<2>(); ++k) {
EXPECT_EQ(view(i, j, k), triplet(i, j, k));
}
}
}
for (int i = view.total_begin<0>(); i <= view.total_end<0>(); ++i) {
for (int j = view.total_begin<1>(); j <= view.total_end<1>(); ++j) {
for (int k = view.total_begin<2>(); k <= view.total_end<2>(); ++k) {
EXPECT_EQ(view(i, j, k), triplet(i, j, k));
}
}
}
}
TEST(DataViewTest, TargetView) {
typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t;
storage_info_t si(2 + 2, 2 + 4, 2 + 6);
typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t;
data_store_t ds(si, [](int i, int j, int k) { return triplet(i, j, k); }, "ds");
auto target_view = make_target_view<access_mode::read_only>(ds);
auto device_view = make_device_view<access_mode::read_only>(ds);
ASSERT_EQ(advanced::get_raw_pointer_of(device_view), advanced::get_raw_pointer_of(target_view));
}
TEST(DataViewTest, CheckMemorySpace) {
typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t;
storage_info_t si(2 + 2 * 1, 2 + 2 * 3, 2 + 2 * 3);
typedef data_store<cuda_storage<int>, storage_info_t> data_store_t;
data_store_t ds(si, -1, "ds");
auto view = make_device_view<access_mode::read_write>(ds);
EXPECT_THROW(view(0, 0, 1), std::runtime_error);
}
| 603cebd4e51a667a7bc2f2cb19d9cce9dcb45f33.cu | /*
* GridTools
*
* Copyright (c) 2014-2019, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "../../tools/triplet.hpp"
#include "gtest/gtest.h"
#include <gridtools/common/gt_assert.hpp>
#include <gridtools/storage/data_store.hpp>
#include <gridtools/storage/storage_cuda/cuda_storage.hpp>
#include <gridtools/storage/storage_cuda/cuda_storage_info.hpp>
#include <gridtools/storage/storage_cuda/data_view_helpers.hpp>
using namespace gridtools;
const int c_x = 3 /* < 32 for this test */, c_y = 5, c_z = 7;
template <typename View>
__global__ void mul2(View s) {
bool correct_dims = (s.template total_length<0>() == c_x) && (s.template total_length<1>() == c_y) &&
(s.template total_length<2>() == c_z);
bool correct_size = (s.padded_total_length() == 32 * c_y * c_z);
s(0, 0, 0) *= (2 * correct_dims * correct_size);
s(1, 0, 0) *= (2 * correct_dims * correct_size);
}
TEST(DataViewTest, Simple) {
typedef cuda_storage_info<0, layout_map<2, 1, 0>> storage_info_t;
typedef data_store<cuda_storage<double>, storage_info_t> data_store_t;
// create and allocate a data_store
constexpr storage_info_t si(c_x, c_y, c_z);
data_store_t ds(si);
// create a rw view and fill with some data
data_view<data_store_t> dv = make_host_view(ds);
GT_STATIC_ASSERT((is_data_view<decltype(dv)>::value), "is_data_view check failed");
dv(0, 0, 0) = 50;
dv(1, 0, 0) = 60;
// check if interface works
ASSERT_TRUE((si.length<0>() == dv.length<0>()));
ASSERT_TRUE((si.length<1>() == dv.length<1>()));
ASSERT_TRUE((si.length<2>() == dv.length<2>()));
ASSERT_TRUE((si.total_length<0>() == dv.total_length<0>()));
ASSERT_TRUE((si.total_length<1>() == dv.total_length<1>()));
ASSERT_TRUE((si.total_length<2>() == dv.total_length<2>()));
ASSERT_TRUE((si.begin<0>() == dv.begin<0>()));
ASSERT_TRUE((si.begin<1>() == dv.begin<1>()));
ASSERT_TRUE((si.begin<2>() == dv.begin<2>()));
ASSERT_TRUE((si.total_begin<0>() == dv.total_begin<0>()));
ASSERT_TRUE((si.total_begin<1>() == dv.total_begin<1>()));
ASSERT_TRUE((si.total_begin<2>() == dv.total_begin<2>()));
ASSERT_TRUE((si.end<0>() == dv.end<0>()));
ASSERT_TRUE((si.end<1>() == dv.end<1>()));
ASSERT_TRUE((si.end<2>() == dv.end<2>()));
ASSERT_TRUE((si.total_end<0>() == dv.total_end<0>()));
ASSERT_TRUE((si.total_end<1>() == dv.total_end<1>()));
ASSERT_TRUE((si.total_end<2>() == dv.total_end<2>()));
ASSERT_TRUE((si.padded_total_length() == dv.padded_total_length()));
// check if the user protections are working
#if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ <= 9
// CUDA10 does not like to evaluate storage_info functions constexpr as a member (m_gpu_ptr) is mutable
static_assert(si.index(1, 0, 0) == 1, "constexpr index method call failed");
#endif
ASSERT_TRUE(si.index(1, 0, 1) == c_y * 32 + 1);
// check if data is there
EXPECT_EQ(50, dv(0, 0, 0));
EXPECT_EQ(dv(1, 0, 0), 60);
// create a ro view
data_view<data_store_t, access_mode::read_only> dvro = make_host_view<access_mode::read_only>(ds);
// check if data is the same
EXPECT_EQ(50, dvro(0, 0, 0));
EXPECT_EQ(dvro(1, 0, 0), 60);
// views are valid (ds <--> dv and ds <--> dvro)
EXPECT_TRUE(check_consistency(ds, dv));
EXPECT_TRUE(check_consistency(ds, dvro));
EXPECT_TRUE(dv.valid());
EXPECT_TRUE(dvro.valid());
// sync, create a device view and call kernel
ds.sync();
auto devv = make_device_view(ds);
GT_STATIC_ASSERT((is_data_view<decltype(devv)>::value), "is_data_view check failed");
EXPECT_TRUE(check_consistency(ds, devv));
EXPECT_FALSE(check_consistency(ds, dv));
EXPECT_FALSE(check_consistency(ds, dvro));
EXPECT_TRUE(devv.valid());
EXPECT_FALSE(dv.valid());
EXPECT_FALSE(dvro.valid());
mul2<<<1, 1>>>(devv);
// sync and check if read only host view is valid
ds.sync();
EXPECT_FALSE(check_consistency(ds, devv));
EXPECT_FALSE(check_consistency(ds, dv));
EXPECT_TRUE(check_consistency(ds, dvro));
EXPECT_FALSE(devv.valid());
EXPECT_FALSE(dv.valid());
EXPECT_TRUE(dvro.valid());
// check if data is the same
EXPECT_EQ(100, dvro(0, 0, 0));
EXPECT_EQ(dvro(1, 0, 0), 120);
// create and allocate a second storage
data_store_t ds_tmp(si);
// again create a view
data_view<data_store_t> dv_tmp = make_host_view<access_mode::read_write>(ds_tmp);
// the combination ds_tmp <--> dv/dvro is not a valid view
EXPECT_FALSE(check_consistency(ds, dv_tmp));
EXPECT_FALSE(check_consistency(ds_tmp, devv));
EXPECT_FALSE(check_consistency(ds_tmp, dvro));
EXPECT_TRUE(check_consistency(ds_tmp, dv_tmp));
EXPECT_TRUE(dv_tmp.valid());
EXPECT_FALSE(devv.valid());
EXPECT_TRUE(dvro.valid());
EXPECT_TRUE(dv_tmp.valid());
// destroy a storage, this should also invalidate the views
ds.reset();
EXPECT_FALSE(check_consistency(ds, dv));
EXPECT_FALSE(check_consistency(ds, dvro));
}
TEST(DataViewTest, ZeroSize) {
typedef cuda_storage_info<0, layout_map<0>> storage_info_t;
typedef data_store<cuda_storage<double>, storage_info_t> data_store_t;
// create and allocate a data_store
data_store_t ds;
make_host_view<access_mode::read_only>(ds);
make_device_view<access_mode::read_only>(ds);
}
TEST(DataViewTest, Looping) {
typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t;
storage_info_t si(2 + 2, 2 + 4, 2 + 6);
typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t;
data_store_t ds(si, [](int i, int j, int k) { return triplet(i, j, k); }, "ds");
auto view = make_host_view<access_mode::read_write>(ds);
for (int i = view.begin<0>(); i <= view.end<0>(); ++i) {
for (int j = view.begin<1>(); j <= view.end<1>(); ++j) {
for (int k = view.begin<2>(); k <= view.end<2>(); ++k) {
EXPECT_EQ(view(i, j, k), triplet(i, j, k));
}
}
}
for (int i = view.total_begin<0>(); i <= view.total_end<0>(); ++i) {
for (int j = view.total_begin<1>(); j <= view.total_end<1>(); ++j) {
for (int k = view.total_begin<2>(); k <= view.total_end<2>(); ++k) {
EXPECT_EQ(view(i, j, k), triplet(i, j, k));
}
}
}
}
TEST(DataViewTest, TargetView) {
typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t;
storage_info_t si(2 + 2, 2 + 4, 2 + 6);
typedef data_store<cuda_storage<triplet>, storage_info_t> data_store_t;
data_store_t ds(si, [](int i, int j, int k) { return triplet(i, j, k); }, "ds");
auto target_view = make_target_view<access_mode::read_only>(ds);
auto device_view = make_device_view<access_mode::read_only>(ds);
ASSERT_EQ(advanced::get_raw_pointer_of(device_view), advanced::get_raw_pointer_of(target_view));
}
TEST(DataViewTest, CheckMemorySpace) {
typedef cuda_storage_info<0, layout_map<0, 1, 2>, halo<1, 2, 3>> storage_info_t;
storage_info_t si(2 + 2 * 1, 2 + 2 * 3, 2 + 2 * 3);
typedef data_store<cuda_storage<int>, storage_info_t> data_store_t;
data_store_t ds(si, -1, "ds");
auto view = make_device_view<access_mode::read_write>(ds);
EXPECT_THROW(view(0, 0, 1), std::runtime_error);
}
|
095a5ca65696df501cf37a871870906b01cf67fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#define PI 3.14159f
#define MAXP 2096552
#define MAXPI 8192
#define MAXN 96
#define MAXG 4193104
#define THREADS 256
struct pair {
int key;
int value;
};
struct grid {
float oX, oY, oZ;
float size;
int nX, nY, nZ;
};
struct simulation {
float minX, maxX;
float minY, maxY;
float minZ, maxZ;
float dt;
int tsn;
int ssi;
int nsi;
};
struct load {
float minX, maxX;
float minY, maxY;
float minZ, maxZ;
float gx, gy, gz;
float w;
};
struct fix {
float minX, maxX;
float minY, maxY;
float minZ, maxZ;
float vx, vy, vz;
float tau;
};
struct outlet {
float oX, oY, oZ;
float nX, nY, nZ;
float R;
};
struct inlet {
float oX, oY, oZ;
float nX, nY, nZ;
float R;
int Material;
float Mass, Smooth;
float Velocity;
float Density, Energy;
float Distance;
};
struct model {
int pn;
int* Material;
float* Mass;
float* Smooth;
float* PosX;
float* PosY;
float* PosZ;
float* VelX;
float* VelY;
float* VelZ;
float* Density;
float* Energy;
float* Pressure;
float* Sound;
float* VelDotX;
float* VelDotY;
float* VelDotZ;
float* DensityDot;
float* EnergyDot;
float* PosX0;
float* PosY0;
float* PosZ0;
float* VelX0;
float* VelY0;
float* VelZ0;
float* Density0;
float* Energy0;
int* List;
int* Hash;
int* Index;
int* SetStart;
int* SetStop;
int* IntDummy;
float* FloatDummy;
};
// Host Variables
int hMatType[10];
float hMatProp[10][10];
struct simulation hRun;
struct grid hGrid;
struct load hLoad[10];
struct fix hFix[10];
struct outlet hOut[10];
struct inlet hIn[10];
// Device Variables
__device__ int dMatType[10];
__device__ float dMatProp[10][10];
__device__ struct simulation dRun;
__device__ struct grid dGrid;
__device__ struct load dLoad[10];
__device__ struct fix dFix[10];
__device__ struct outlet dOut[10];
__device__ struct inlet dIn[10];
__host__ __device__ float kernelWendland(float r, float h) {
float q, alpha, w;
/**
* \brief Wendland kernel
*
* \date Feb 8, 2011
* \author Luca Massidda
*/
q = r / h;
// for 3D
alpha = 15.0f / (16.0f * PI * h * h * h);
// for 2D
//alpha = 7.0f / (4.0f * PI * h * h);
w = 0.0f;
if (q < 2) {
w = powf((1.0f - 0.5f*q),4);
w *= 1.0f + 2.0f*q;
w *= alpha;
}
return w;
}
__host__ __device__ float kernelDerivWendland(float r, float h) {
float q, alpha, dwdr;
/**
* \brief Wendland kernel derivative
*
* \date Feb 8, 2011
* \author Luca Massidda
*/
q = r / h;
// for 3D
alpha = 15.0f / (16.0f * PI * h * h * h);
// for 2D
//alpha = 7.0f / (4.0f * PI * h * h);
dwdr = 0.0f;
if (q < 2) {
dwdr = 5.0f / 8.0f * q * powf((q - 2.0f), 3) ;
dwdr *= alpha / h;
}
return dwdr;
}
// Global code
void balanceMassMomentumHost(const int pn, const int* List,
const int* Material, const float* Mass, const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
const float* Density, const float* Pressure, const float* Sound,
float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) {
/**
* \brief Interate particles
*
* \date Jan 6, 2011
* \author Luca Massidda
*/
int ip, il, jp;
float iDensityDot;
float iVelDotX, iVelDotY, iVelDotZ;
float iSmooth, jMass;
float dx, dy, dz, dr, dvr, dwdr, f, w;
for (ip = 0; ip < pn; ip++) {
iDensityDot = 0.0f;
iVelDotX = 0.0f;
iVelDotY = 0.0f;
iVelDotZ = 0.0f;
iSmooth = Smooth[ip];
for (il = 0; il < MAXN; il++) {
jp = List[ip * MAXN + il];
jMass = Mass[jp];
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth;
w = kernelWendland(dr, iSmooth);
dwdr = kernelDerivWendland(dr, iSmooth);
dvr = 0.0f;
dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]);
dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]);
dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]);
iDensityDot += jMass * dvr * dwdr / dr;
// Calculate interparticle pressure action
f = -(Pressure[ip] / powf((Density[ip] + hMatProp[Material[ip]][0]), 2)
+ Pressure[jp] / powf((Density[jp] + hMatProp[Material[jp]][0]), 2));
iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr;
// Calculate shock correction for mass
f = Density[ip] - Density[jp];
f *= 2.0f * Sound[ip];
f /= Density[ip] + Density[jp] + hMatProp[Material[ip]][0] + hMatProp[Material[jp]][0];
iDensityDot += jMass * f * dwdr;
// Calculate shock correction for momentum
if (dvr < 0.0f) f = dvr;
else f = 0.0f;
f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth);
f *= 2.0f * Sound[ip];
f /= Density[ip] + Density[jp] + hMatProp[Material[ip]][0] + hMatProp[Material[jp]][0];
f *= 0.03f;
iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr;
// Calculate boundary repulsion
if (Material[ip] != Material[jp]) {
f = w * powf(Smooth[jp] * Sound[jp], 2);
iVelDotX += jMass / (Mass[ip] + jMass) * f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += jMass / (Mass[ip] + jMass) * f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += jMass / (Mass[ip] + jMass) * f * (PosZ[ip] - PosZ[jp]) / dr;
}
}
DensityDot[ip] += iDensityDot;
VelDotX[ip] += iVelDotX;
VelDotY[ip] += iVelDotY;
VelDotZ[ip] += iVelDotZ;
}
}
__global__ void balanceMassMomentumDevice(const int pn, const int* List,
const int* Material, const float* Mass, const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
const float* Density, const float* Pressure, const float* Sound,
float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) {
/**
* \brief Interate particles
*
* \date Jan 6, 2011
* \author Luca Massidda
*/
int ip, il, jp;
float iDensityDot;
float iVelDotX, iVelDotY, iVelDotZ;
float iSmooth, jMass;
volatile float dx, dy, dz, dr, dvr, dwdr, f, phi, w;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
iDensityDot = 0.0f;
iVelDotX = 0.0f;
iVelDotY = 0.0f;
iVelDotZ = 0.0f;
iSmooth = Smooth[ip];
for (il = 0; il < MAXN; il++) {
jp = List[ip * MAXN + il];
jMass = Mass[jp];
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth;
w = kernelWendland(dr, iSmooth);
dwdr = kernelDerivWendland(dr, iSmooth);
if (Material[ip] == Material[jp]) {
dvr = 0.0f;
dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]);
dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]);
dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]);
iDensityDot += jMass * dvr * dwdr / dr;
// Calculate interparticle pressure action
f = -(Pressure[ip] / powf((Density[ip] + dMatProp[Material[ip]][0]), 2)
+ Pressure[jp] / powf((Density[jp] + dMatProp[Material[jp]][0]), 2));
f *= jMass * dwdr;
iVelDotX += f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr;
// Calculate shock correction for mass
f = Density[ip] - Density[jp];
f *= 2.0f * Sound[ip];
f /= Density[ip] + Density[jp] + dMatProp[Material[ip]][0] + dMatProp[Material[jp]][0];
iDensityDot += jMass * f * dwdr;
// Calculate shock correction for momentum
phi = fabs(dvr * iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth));
f = -1.0f*(Sound[ip] * phi + 0.75f * phi * phi) / (Density[ip] + dMatProp[Material[ip]][0]);
f *= jMass * dwdr;
if (dvr >= 0.0f) f = 0.0f;
iVelDotX += f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr;
}
// Calculate boundary repulsion
if (Material[ip] != Material[jp]) {
f = w * powf(Smooth[jp] * Sound[jp], 2);
iVelDotX += f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr;
}
}
DensityDot[ip] += iDensityDot;
VelDotX[ip] += iVelDotX;
VelDotY[ip] += iVelDotY;
VelDotZ[ip] += iVelDotZ;
}
}
void balanceEnergyHost(const int pn, const int* Material,
const float* Pressure, const float* Density,
const float* DensityDot, float* EnergyDot) {
/**
* \brief Interate particles
*
* \date Jan 9, 2011
* \author Luca Massidda
*/
int ip;
float iPressure, iDensity, iDensityDot;
float iEnergyDot;
for (ip = 0; ip < pn; ip++) {
iPressure = Pressure[ip];
iDensity = Density[ip];
iDensityDot = DensityDot[ip];
iEnergyDot = iPressure * iDensityDot;
iEnergyDot *= powf(iDensity + hMatProp[Material[ip]][0], -2.0f);
EnergyDot[ip] += iEnergyDot;
}
}
__global__ void balanceEnergyDevice(const int pn, const int* Material,
const float* Pressure, const float* Density,
const float* DensityDot, float* EnergyDot) {
/**
* \brief Interate particles
*
* \date Jan 9, 2011
* \author Luca Massidda
*/
volatile int ip;
float iPressure, iDensity, iDensityDot;
float iEnergyDot;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
iPressure = Pressure[ip];
iDensity = Density[ip];
iDensityDot = DensityDot[ip];
//iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity);
iEnergyDot = iPressure * iDensityDot;
iEnergyDot *= powf(iDensity + dMatProp[Material[ip]][0], -2.0f);
EnergyDot[ip] += iEnergyDot;
}
}
__host__ __device__ float pressureGas(float* properties, float rho, float u) {
/**
* \brief Ideal gas Equation Of State
*
* p = (k -1) rho u
* c = (k(k -1) u)^0.5
*
* k = properties[1]
* pshift = properties[2]
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float p;
p = (properties[1] - 1.0f) * (rho + properties[0]) * u;
p += properties[2];
return p;
}
__host__ __device__ float pressurePoly(float* properties, float rho, float u) {
/**
* \brief Mie-Gruneisen polynomial Equation Of State
*
* p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression
* p = t1 mu + t2 mu^2 + b0 rho0 u in tension
*
* rho0 = properties[0];
* a1 = properties[1];
* a2 = properties[2];
* a3 = properties[3];
* b0 = properties[4];
* b1 = properties[5];
* t1 = properties[6];
* t2 = properties[7];
* pmin = properties[8];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float mu;
float p;
mu = rho / properties[0];
if (mu < 0)
p = (properties[6] * mu + properties[7] * mu*mu)
+ (properties[4] * properties[0] * u);
else
p = (properties[1] * mu + properties[2] * mu*mu
+ properties[3] * mu*mu*mu)
+ ((properties[4] + properties[5] * mu)
* properties[0] * u);
//if (p < properties[8]) p = properties[8];
return p;
}
__host__ __device__ float pressureShock(float* properties, float rho, float u) {
/**
* \brief Mie-Gruneisen Shock Hugoniot Equation Of State
*
* mu = rho / rho0 -1
* g = g * rho0 / rho
* ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2
* uh = 1/2 ph/rho0 * (mu / (1 + mu))
* p = ph + g * rho * (u - uh)
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* s0 = properties[3];
* pmin = properties[4];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float mu;
float p, ph;
mu = rho / properties[0];
ph = (properties[0] * powf(properties[1], 2) * mu*(1.0f +mu))
/ powf((1.0f - (properties[3] -1.0f) * mu), 2);
p = ph + properties[2] * properties[0]
* (u - (0.5f * ph / properties[0] * (mu / (1.0f + mu))));
if (p < properties[4]) p = properties[4];
return p;
}
__host__ __device__ float pressureTait(float* properties, float rho, float u) {
/**
* \brief Tait Equation Of State
*
* p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0);
* c = c0;
*
* rho0 = properties[0];
* c0 = properties[1];
* pmin = properties[2];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float p;
//p = properties[0] * powf(properties[1], 2) / 7.0f
// * (powf((rho / properties[0]), 7) - 1.0f);
p = properties[0] * powf(properties[1], 2) / 7.0f
* (powf(rho / properties[0] +1.0f, 7) - 1.0f);
//if (p < properties[2]) p = properties[2];
return p;
}
__host__ __device__ float pressureESS(float* properties, float rho, float u) {
/**
* \brief Simple Equation Of State
*
* mu = rho / rho0 -1
* p = rho0 c0^2 mu + g0 * rho0 * u
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* pmin = properties[3];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float mu;
float p;
mu = rho / properties[0];
p = mu * properties[0] * powf(properties[1], 2) + u * properties[0] * properties[2];
if (p < properties[3]) p = properties[3];
return p;
}
__host__ __device__ float soundGas(float* properties ,float rho, float u) {
/**
* \brief Ideal gas Equation Of State
*
* p = (k -1) rho u
* c = (k(k -1) u)^0.5
*
* k = properties[1]
* pshift = properties[2]
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = sqrtf(properties[1] * (properties[1] - 1.0f) * u);
return c;
}
__host__ __device__ float soundPoly(float* properties , float rho, float u) {
/**
* \brief Mie-Gruneisen polynomial Equation Of State
*
* p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression
* p = t1 mu + t2 mu^2 + b0 rho0 u in tension
*
* rho0 = properties[0];
* a1 = properties[1];
* a2 = properties[2];
* a3 = properties[3];
* b0 = properties[4];
* b1 = properties[5];
* t1 = properties[6];
* t2 = properties[7];
* pmin = properties[8];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = sqrtf(properties[1] / (rho + properties[0]));
return c;
}
__host__ __device__ float soundShock(float* properties, float rho, float u) {
/**
* \brief Mie-Gruneisen Shock Hugoniot Equation Of State
*
* mu = rho / rho0 -1
* g = g * rho0 / rho
* ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2
* uh = 1/2 ph/rho0 * (mu / (1 + mu))
* p = ph + g * rho * (u - uh)
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* s0 = properties[3];
* pmin = properties[4];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = properties[1];
return c;
}
__host__ __device__ float soundTait(float* properties, float rho, float u) {
/**
* \brief Tait Equation Of State
*
* p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0);
* c = c0;
*
* rho0 = properties[0];
* c0 = properties[1];
* pmin = properties[2];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = properties[1];
return c;
}
__host__ __device__ float soundESS(float* properties, float rho, float u) {
/**
* \brief Simple Equation Of State
*
* mu = rho / rho0 -1
* p = rho0 c0^2 mu + g0 * rho0 * u
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* pmin = properties[3];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = properties[1];
return c;
}
__host__ __device__ float densityPoly(float* properties , float rho) {
/**
* \brief Mie-Gruneisen polynomial Equation Of State
*
* p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression
* p = t1 mu + t2 mu^2 + b0 rho0 u in tension
*
* rho0 = properties[0];
* a1 = properties[1];
* a2 = properties[2];
* a3 = properties[3];
* b0 = properties[4];
* b1 = properties[5];
* t1 = properties[6];
* t2 = properties[7];
* pmin = properties[8];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float rho0;
rho0 = properties[0];
if (rho < -0.1f * rho0) rho = -0.1f*rho0;
return rho;
}
__host__ __device__ float densityShock(float* properties, float rho) {
/**
* \brief Mie-Gruneisen Shock Hugoniot Equation Of State
*
* mu = rho / rho0 -1
* g = g * rho0 / rho
* ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2
* uh = 1/2 ph/rho0 * (mu / (1 + mu))
* p = ph + g * rho * (u - uh)
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* s0 = properties[3];
* pmin = properties[4];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c0, pmin, rhomin;
c0 = properties[1];
pmin = properties[4];
rhomin = pmin / (c0 * c0);
//if (p < properties[4]) p = properties[4];
//if (rho < -0.1f * rho0) rho = -0.1f*rho0;
if (rho < rhomin) rho = rhomin;
return rho;
}
__host__ __device__ float densityTait(float* properties, float rho) {
/**
* \brief Tait Equation Of State
*
* p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0);
* c = c0;
*
* rho0 = properties[0];
* c0 = properties[1];
* pmin = properties[2];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float rho0;
rho0 = properties[0];
if (rho < -0.1f * rho0) rho = -0.1f*rho0;
return rho;
}
void updateParticlesHost(const int pn, const float alpha,
const int* Material,
const float* VelDotX, const float* VelDotY, const float* VelDotZ,
const float* DensityDot, const float* EnergyDot,
const float* PosX0, const float* PosY0, const float* PosZ0,
const float* VelX0, const float* VelY0, const float* VelZ0,
const float* Density0, const float* Energy0,
float* PosX, float* PosY, float* PosZ,
float* VelX, float* VelY, float* VelZ,
float* Density, float* Energy, float* Pressure, float* Sound) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip;
int iMaterial;
for (ip = 0; ip < pn; ip++) if (Material[ip] != 0) {
PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + hRun.dt * VelX[ip] - PosX0[ip]);
PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + hRun.dt * VelY[ip] - PosY0[ip]);
PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + hRun.dt * VelZ[ip] - PosZ0[ip]);
VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + hRun.dt * VelDotX[ip] - VelX0[ip]);
VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + hRun.dt * VelDotY[ip] - VelY0[ip]);
VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + hRun.dt * VelDotZ[ip] - VelZ0[ip]);
Density[ip] = Density0[ip] + alpha * (Density[ip] + hRun.dt * DensityDot[ip] - Density0[ip]);
Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + hRun.dt * EnergyDot[ip] - Energy0[ip]);
iMaterial = Material[ip];
if (hMatType[iMaterial] == 0) {
VelX[ip] = VelX0[ip];
VelY[ip] = VelY0[ip];
VelZ[ip] = VelZ0[ip];
Density[ip] = Density0[ip];
Energy[ip] = Energy0[ip];
}
switch (hMatType[iMaterial]) {
case (0) : // BOUNDARY
Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = 0.0f*pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (1) : // IDEAL GAS EOS
Pressure[ip] = pressureGas(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundGas(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS
Density[ip] = densityPoly(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressurePoly(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundPoly(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (3) : // MIE-GRUNEISEN SHOCK EOS
Density[ip] = densityShock(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureShock(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundShock(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (4) : // TAIT EOS
Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (5) : // ESS EOS
Pressure[ip] = pressureESS(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundESS(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
default :
Pressure[ip] = 0.0f;
}
}
}
__global__ void updateParticlesDevice(const int pn, const float alpha,
const int* Material,
const float* VelDotX, const float* VelDotY, const float* VelDotZ,
const float* DensityDot, const float* EnergyDot,
const float* PosX0, const float* PosY0, const float* PosZ0,
const float* VelX0, const float* VelY0, const float* VelZ0,
const float* Density0, const float* Energy0,
float* PosX, float* PosY, float* PosZ,
float* VelX, float* VelY, float* VelZ,
float* Density, float* Energy, float* Pressure, float* Sound) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip;
int iMaterial;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + dRun.dt * VelX[ip] - PosX0[ip]);
PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + dRun.dt * VelY[ip] - PosY0[ip]);
PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + dRun.dt * VelZ[ip] - PosZ0[ip]);
VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + dRun.dt * VelDotX[ip] - VelX0[ip]);
VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + dRun.dt * VelDotY[ip] - VelY0[ip]);
VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + dRun.dt * VelDotZ[ip] - VelZ0[ip]);
Density[ip] = Density0[ip] + alpha * (Density[ip] + dRun.dt * DensityDot[ip] - Density0[ip]);
Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + dRun.dt * EnergyDot[ip] - Energy0[ip]);
iMaterial = Material[ip];
if (dMatType[iMaterial] == 0) {
VelX[ip] = VelX0[ip];
VelY[ip] = VelY0[ip];
VelZ[ip] = VelZ0[ip];
Density[ip] = Density0[ip];
Energy[ip] = Energy0[ip];
}
switch (dMatType[iMaterial]) {
case (0) : // BOUNDARY
Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = 0.0f*pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (1) : // IDEAL GAS EOS
Pressure[ip] = pressureGas(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundGas(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS
Density[ip] = densityPoly(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressurePoly(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundPoly(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (3) : // MIE-GRUNEISEN SHOCK EOS
//Density[ip] = densityShock(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureShock(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundShock(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (4) : // TAIT EOS
Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (5) : // ESS EOS
Pressure[ip] = pressureESS(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundESS(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
default :
Pressure[ip] = 0.0f;
}
}
}
void updateLoadsHost(const int pn, const int* Material,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
float* VelDotX, float* VelDotY, float* VelDotZ,
float* EnergyDot) {
int ip, i;
for (ip = 0; ip < pn; ip++) {
if (Material[ip] > 0) {
for (i = 0; i < 10; i++) {
if ((PosX[ip] > hLoad[i].minX) &&
(PosX[ip] < hLoad[i].maxX) &&
(PosZ[ip] < hLoad[i].maxZ) &&
(PosY[ip] > hLoad[i].minY) &&
(PosY[ip] < hLoad[i].maxY) &&
(PosZ[ip] > hLoad[i].minZ) &&
(PosZ[ip] < hLoad[i].maxZ)) {
VelDotX[ip] += hLoad[i].gx;
VelDotY[ip] += hLoad[i].gy;
VelDotZ[ip] += hLoad[i].gz;
EnergyDot[ip] += hLoad[i].w;
}
if ((PosX[ip] > hFix[i].minX) &&
(PosX[ip] < hFix[i].maxX) &&
(PosY[ip] > hFix[i].minY) &&
(PosY[ip] < hFix[i].maxY) &&
(PosZ[ip] > hFix[i].minZ) &&
(PosZ[ip] < hFix[i].maxZ)) {
VelDotX[ip] += (hFix[i].vx - VelX[ip]) / hFix[i].tau;
VelDotY[ip] += (hFix[i].vy - VelY[ip]) / hFix[i].tau;
VelDotZ[ip] += (hFix[i].vz - VelZ[ip]) / hFix[i].tau;
}
}
}
}
}
__global__ void updateLoadsDevice(const int pn, const int* Material,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
float* VelDotX, float* VelDotY, float* VelDotZ,
float* EnergyDot, const float time) {
int ip, i;
float x1, x2, x3, f;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if ((ip < pn) && (Material[ip] > 0)) {
for (i = 0; i < 10; i++) {
if ((PosX[ip] > dLoad[i].minX) &&
(PosX[ip] < dLoad[i].maxX) &&
(PosZ[ip] < dLoad[i].maxZ) &&
(PosY[ip] > dLoad[i].minY) &&
(PosY[ip] < dLoad[i].maxY) &&
(PosZ[ip] > dLoad[i].minZ) &&
(PosZ[ip] < dLoad[i].maxZ)) {
VelDotX[ip] += dLoad[i].gx;
VelDotY[ip] += dLoad[i].gy;
VelDotZ[ip] += dLoad[i].gz;
EnergyDot[ip] += dLoad[i].w;
}
if ((PosX[ip] > dFix[i].minX) &&
(PosX[ip] < dFix[i].maxX) &&
(PosY[ip] > dFix[i].minY) &&
(PosY[ip] < dFix[i].maxY) &&
(PosZ[ip] > dFix[i].minZ) &&
(PosZ[ip] < dFix[i].maxZ)) {
VelDotX[ip] += (dFix[i].vx - VelX[ip]) / dFix[i].tau;
VelDotY[ip] += (dFix[i].vy - VelY[ip]) / dFix[i].tau;
VelDotZ[ip] += (dFix[i].vz - VelZ[ip]) / dFix[i].tau;
}
}
x1 = 100.0f * PosZ[ip];
x2 = 100.0f * (PosY[ip] -0.04f);
x3 = 100.0f * (PosX[ip] +0.10f);
x3 -= x2 * 3.732f;
if (x3 < 0.0f) x3 = 0.0f;
f = expf(-0.5f * (powf(x1 / 5.0f, 2) + powf(x2 / 1.5f, 2)));
f *= 0.00130948f;
f *= expf(-x3 / 15.5814f);
f *= 1.0f - expf(-0.654066f - x3 / 6.72606f);
f *= 2.0e12 / 10388.0f;
f *= 50.0f;
if (fmodf(time, 1.0f/20.0f) > 0.001f) f = 0.0f;
//EnergyDot[ip] += f;
}
}
// Host code
int initHost(struct model *hm) {
hm->Material = (int *) malloc(MAXP * sizeof(int));
hm->Mass = (float *) malloc(MAXP * sizeof(float));
hm->Smooth = (float *) malloc(MAXP * sizeof(float));
hm->PosX = (float *) malloc(MAXP * sizeof(float));
hm->PosY = (float *) malloc(MAXP * sizeof(float));
hm->PosZ = (float *) malloc(MAXP * sizeof(float));
hm->VelX = (float *) malloc(MAXP * sizeof(float));
hm->VelY = (float *) malloc(MAXP * sizeof(float));
hm->VelZ = (float *) malloc(MAXP * sizeof(float));
hm->Density = (float *) malloc(MAXP * sizeof(float));
hm->Energy = (float *) malloc(MAXP * sizeof(float));
hm->Pressure = (float *) malloc(MAXP * sizeof(float));
hm->Sound = (float *) malloc(MAXP * sizeof(float));
hm->VelDotX = (float *) malloc(MAXP * sizeof(float));
hm->VelDotY = (float *) malloc(MAXP * sizeof(float));
hm->VelDotZ = (float *) malloc(MAXP * sizeof(float));
hm->DensityDot = (float *) malloc(MAXP * sizeof(float));
hm->EnergyDot = (float *) malloc(MAXP * sizeof(float));
hm->PosX0 = (float *) malloc(MAXP * sizeof(float));
hm->PosY0 = (float *) malloc(MAXP * sizeof(float));
hm->PosZ0 = (float *) malloc(MAXP * sizeof(float));
hm->VelX0 = (float *) malloc(MAXP * sizeof(float));
hm->VelY0 = (float *) malloc(MAXP * sizeof(float));
hm->VelZ0 = (float *) malloc(MAXP * sizeof(float));
hm->Density0 = (float *) malloc(MAXP * sizeof(float));
hm->Energy0 = (float *) malloc(MAXP * sizeof(float));
hm->Hash = (int *) malloc(MAXP * sizeof(int));
hm->Index = (int *) malloc(MAXP * sizeof(int));
hm->List = (int *) malloc(MAXP * MAXN * sizeof(int));
hm->IntDummy = (int *) malloc(MAXP * sizeof(int));
hm->FloatDummy = (float *) malloc(MAXP * sizeof(float));
hm->SetStart = (int *) malloc(MAXG * sizeof(int));
hm->SetStop = (int *) malloc(MAXG * sizeof(int));
return 0;
}
int initDevice(struct model *dm) {
size_t available, total;
hipMalloc((void**) &(dm->Material), (MAXP * sizeof(int)));
hipMalloc((void**) &(dm->Mass), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Smooth), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->PosX), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->PosY), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->PosZ), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelX), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelY), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelZ), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Density), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Energy), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Pressure), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Sound), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelDotX), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelDotY), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelDotZ), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->DensityDot), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->EnergyDot), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->PosX0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->PosY0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->PosZ0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelX0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelY0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->VelZ0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Density0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Energy0), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->Hash), (MAXP * sizeof(int)));
hipMalloc((void**) &(dm->Index), (MAXP * sizeof(int)));
hipMalloc((void**) &(dm->List), (MAXP * MAXN * sizeof(int)));
hipMalloc((void**) &(dm->IntDummy), (MAXP * sizeof(int)));
hipMalloc((void**) &(dm->FloatDummy), (MAXP * sizeof(float)));
hipMalloc((void**) &(dm->SetStart), (MAXG * sizeof(int)));
hipMalloc((void**) &(dm->SetStop), (MAXG * sizeof(int)));
hipMemGetInfo(&available, &total);
printf("Available memory %d of %d MB\n", available/1024/1024, total/1024/1024);
return 0;
}
int copyHostToDevice(struct model *hm, struct model *dm) {
dm->pn = hm->pn;
hipMemcpy(dm->Material, hm->Material, (MAXP * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dm->Mass, hm->Mass, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Smooth, hm->Smooth, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosX, hm->PosX, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosY, hm->PosY, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosZ, hm->PosZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelX, hm->VelX, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelY, hm->VelY, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelZ, hm->VelZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Density, hm->Density, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Energy, hm->Energy, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Pressure, hm->Pressure, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Sound, hm->Sound, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelDotX, hm->VelDotX, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelDotY, hm->VelDotY, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelDotZ, hm->VelDotZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->DensityDot, hm->DensityDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->EnergyDot, hm->EnergyDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosX0, hm->PosX0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosY0, hm->PosY0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosZ0, hm->PosZ0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelX0, hm->VelX0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelY0, hm->VelY0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelZ0, hm->VelZ0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Density0, hm->Density0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Energy0, hm->Energy0, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->List, hm->List, (MAXP * MAXN * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dm->Hash, hm->Hash, (MAXP * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dm->Index, hm->Index, (MAXP * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dm->IntDummy, hm->IntDummy, (MAXP * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dm->FloatDummy, hm->FloatDummy, (MAXP * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->SetStart, hm->SetStart, (MAXG * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dm->SetStop, hm->SetStop, (MAXG * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(&dGrid, &hGrid, (sizeof(struct grid)), hipMemcpyHostToDevice);
hipMemcpyToSymbol(dMatType, hMatType, 10 * sizeof(int));
hipMemcpyToSymbol(dMatProp, hMatProp, 100 * sizeof(float));
//hipMemcpyToSymbol(dGrid, &hGrid, sizeof(struct grid));
hipMemcpyToSymbol(dRun, &hRun, sizeof(struct simulation));
hipMemcpyToSymbol(dLoad, &hLoad, 10 * sizeof(struct load));
hipMemcpyToSymbol(dFix, &hFix, 10 * sizeof(struct fix));
hipMemcpyToSymbol(dIn, &hIn, 10 * sizeof(struct inlet));
hipMemcpyToSymbol(dOut, &hOut, 10 * sizeof(struct outlet));
return 0;
}
int copyDeviceToHost(struct model *dm, struct model *hm) {
hm->pn = dm->pn;
hipMemcpy(hm->Material, dm->Material, (MAXP * sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Mass, dm->Mass, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Smooth, dm->Smooth, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->PosX, dm->PosX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->PosY, dm->PosY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->PosZ, dm->PosZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelX, dm->VelX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelY, dm->VelY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelZ, dm->VelZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Density, dm->Density, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Energy, dm->Energy, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Pressure, dm->Pressure, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Sound, dm->Sound, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelDotX, dm->VelDotX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelDotY, dm->VelDotY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelDotZ, dm->VelDotZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->DensityDot, dm->DensityDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->EnergyDot, dm->EnergyDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->PosX0, dm->PosX0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->PosY0, dm->PosY0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->PosZ0, dm->PosZ0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelX0, dm->VelX0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelY0, dm->VelY0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->VelZ0, dm->VelZ0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Density0, dm->Density0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Energy0, dm->Energy0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->List, dm->List, (MAXP * MAXN * sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Hash, dm->Hash, (MAXP * sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(hm->Index, dm->Index, (MAXP * sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(hm->IntDummy, dm->IntDummy, (MAXP * sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(hm->FloatDummy, dm->FloatDummy, (MAXP * sizeof(float)), hipMemcpyDeviceToHost);
hipMemcpy(hm->SetStart, dm->SetStart, (MAXG * sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(hm->SetStop, dm->SetStop, (MAXG * sizeof(int)), hipMemcpyDeviceToHost);
hipMemcpy(&hGrid, &dGrid, (sizeof(struct grid)), hipMemcpyDeviceToHost);
return 0;
}
int backupDataHost(struct model *hm) {
memcpy(hm->PosX0, hm->PosX, MAXP * sizeof(float));
memcpy(hm->PosY0, hm->PosY, MAXP * sizeof(float));
memcpy(hm->PosZ0, hm->PosZ, MAXP * sizeof(float));
memcpy(hm->VelX0, hm->VelX, MAXP * sizeof(float));
memcpy(hm->VelY0, hm->VelY, MAXP * sizeof(float));
memcpy(hm->VelZ0, hm->VelZ, MAXP * sizeof(float));
memcpy(hm->Density0, hm->Density, MAXP * sizeof(float));
memcpy(hm->Energy0, hm->Energy, MAXP * sizeof(float));
return 0;
}
int backupDataDevice(struct model *dm) {
hipMemcpy(dm->PosX0, dm->PosX, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
hipMemcpy(dm->PosY0, dm->PosY, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
hipMemcpy(dm->PosZ0, dm->PosZ, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
hipMemcpy(dm->VelX0, dm->VelX, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
hipMemcpy(dm->VelY0, dm->VelY, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
hipMemcpy(dm->VelZ0, dm->VelZ, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
hipMemcpy(dm->Density0, dm->Density, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
hipMemcpy(dm->Energy0, dm->Energy, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice);
return 0;
}
/*
int initRun() {
FILE *stream;
char tok[10];
int i, m, p, pn;
int iv;
float fv;
int mpn, mpp[10];
// Open stream file
stream = fopen("armando.run", "r");
while (!feof(stream)) {
sprintf(tok, " ");
fscanf(stream, "%s", tok);
if (strcmp(tok, "MAT") == 0) {
fscanf(stream, "%i", &iv);
if ((iv > 0) && (iv <= 50))
m = iv;
for (p = 0; p < 10; p++)
hMatProp[m][p] = 0.0;
if ((m > 0) && (m <= 10))
pn = 3;
if ((m > 10) && (m <= 20))
pn = 9;
if ((m > 20) && (m <= 30))
pn = 10;
if ((m > 30) && (m <= 40))
pn = 5;
if ((m > 40) && (m <= 50))
pn = 3;
for (p = 0; p < pn; p++) {
fscanf(stream, "%f", &fv);
hMatProp[m][p] = fv;
}
printf("Material %d\n", m);
printf("hMatProp: \n");
for (p = 0; p < pn; p++)
printf(" %f\n", hMatProp[m][p]);
printf("\n");
}
if (strcmp(tok, "TIME") == 0) {
fscanf(stream, "%f", &fv);
if (fv > 0.0)
hRun.dt = fv;
fscanf(stream, "%i", &iv);
if (iv > 0)
hRun.tsn = iv;
fscanf(stream, "%i", &iv);
if (iv > 0)
hRun.ssi = iv;
printf("Time step: %f\n", hRun.dt);
printf("Steps: %i\n", hRun.tsn);
printf("Save step: %i\n", hRun.ssi);
printf("\n");
}
if (strcmp(tok, "LIMITS") == 0) {
fscanf(stream, "%f", &fv);
hRun.minX = fv;
fscanf(stream, "%f", &fv);
hRun.maxX = fv;
fscanf(stream, "%f", &fv);
hRun.minY = fv;
fscanf(stream, "%f", &fv);
hRun.maxY = fv;
printf("Domain limits: \n");
printf("X: %+e - %+e \n", hRun.minX, hRun.maxX);
printf("Y: %+e - %+e \n", hRun.minY, hRun.maxY);
printf("\n");
}
if (strcmp(tok, "MONITORS") == 0) {
fscanf(stream, "%i", &iv);
mpn = iv;
for (i = 0; i < mpn; i++) {
fscanf(stream, "%i", &iv);
mpp[i] = iv;
}
printf("Monitored particles: %i \n", mpn);
if (mpn > 0) {
printf("Index:");
for (i = 0; i < mpn; i++)
printf(" %i", mpp[i]);
printf("\n");
printf("\n");
}
}
}
fclose(stream);
hSound = hSmooth / hRun.dt;
return 0;
}
*/
int scanData(struct model *hm) {
FILE *stream;
int i;
float fv1, fv2, fv3, fv4;
int iv;
// Stream file position
stream = fopen("pos.txt", "r");
for (i = 0; !feof(stream); i++) {
fscanf(stream, "%e %e %e", &fv1, &fv2, &fv3);
hm->PosX[i] = fv1;
hm->PosY[i] = fv2;
hm->PosZ[i] = fv3;
}
fclose(stream);
hm->pn = i;
// Stream file velocity
stream = fopen("vel.txt", "r");
for (i = 0; i < hm->pn; i++) {
fscanf(stream, "%e %e %e", &fv1, &fv2, &fv3);
hm->VelX[i] = fv1;
hm->VelY[i] = fv2;
hm->VelZ[i] = fv3;
}
fclose(stream);
// Stream file info
stream = fopen("info.txt", "r");
for (i = 0; i < hm->pn; i++) {
fscanf(stream, "%i %e %e ", &iv, &fv1, &fv2);
hm->Material[i] = iv;
hm->Mass[i] = fv1;
hm->Smooth[i] = fv2;
}
fclose(stream);
// Stream file field
stream = fopen("field.txt", "r");
for (i = 0; i < hm->pn; i++) {
fscanf(stream, "%e %e %e %e ", &fv1, &fv2, &fv3, &fv4);
hm->Density[i] = fv1;
hm->Energy[i] = fv2;
hm->Pressure[i] = fv3;
hm->Sound[i] = fv4;
}
fclose(stream);
return 0;
}
int printData(struct model *hm) {
/**
* \brief Particle data file output
*
* Saves particle data on a disk file
*
* \date Oct 21, 2010
* \author Luca Massidda
*/
FILE *stream;
int i;
// Stream file position
stream = fopen("pos.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]);
fclose(stream);
// Stream file velocity
stream = fopen("vel.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]);
fclose(stream);
// Stream file info
stream = fopen("info.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%i %+14.8e %+14.8e \n", hm->Material[i], hm->Mass[i], hm->Smooth[i]);
fclose(stream);
// Stream file field
stream = fopen("field.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+14.8e %+14.8e %+14.8e %+14.8e \n", hm->Density[i], hm->Energy[i], hm->Pressure[i], hm->Sound[i]);
fclose(stream);
/*
// Stream file debug
stream = fopen("debug.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%d %d %d %d %d\n", i, hm->Index[i], hm->Hash[i], hm->SetStart[hm->Hash[i]], hm->SetStop[hm->Hash[i]]);
fclose(stream);
// Stream file list
stream = fopen("list.txt", "w");
for (i = 0; i < hm->pn; i++) {
for (j = 0; j < 10; j++)
fprintf(stream, "%d ", hm->List[i*MAXN +j]);
fprintf(stream, "\n");
}
fclose(stream);
*/
return 0;
}
int outputVTK(struct model *hm, int ss) {
/**
* \brief Output Data file
*
* Saves vtk data file
*
* \date Oct 21, 2010
* \author Luca Massidda
*/
FILE *stream;
char filename[80];
int i;
// Stream position file
sprintf(filename, "out%05d.vtk", ss);
stream = fopen(filename, "w");
fprintf(stream, "# vtk DataFile Version 2.0\n");
fprintf(stream, "Unstructured Grid Example\n");
fprintf(stream, "ASCII\n");
fprintf(stream, "DATASET UNSTRUCTURED_GRID\n");
fprintf(stream, "POINTS %i float\n", hm->pn);
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e %+e %+e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]);
fprintf(stream, "CELLS %i %i \n", hm->pn, 2*hm->pn);
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%i %i \n", 1, i);
fprintf(stream, "CELL_TYPES %i \n", hm->pn);
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%i \n", 1);
fprintf(stream, "POINT_DATA %i \n", hm->pn);
fprintf(stream, "SCALARS material int 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+d \n", hm->Material[i]);
fprintf(stream, "SCALARS density float 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e \n", hm->Density[i]);
fprintf(stream, "SCALARS pressure float 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e \n", hm->Pressure[i]);
fprintf(stream, "SCALARS energy float 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e \n", hm->Energy[i]);
fprintf(stream, "VECTORS velocity float\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e %+e %+e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]);
fclose(stream);
/*
for (i = 0; i < hm->pn; i++) printf("%d %d \n", i, hm->Hash[i]);
printf("\n\n\n");
for (i = 0; i < hm->SetStart.size(); i++) printf("%d %d %d \n", i, hm->SetStart[i], hm->SetStop[i]);
for (i = 0; i < hm->pn; i++) {
printf("%d - ", i);
for (j = 0; j < MAXN; j++) printf("%d ", hm->List[i*MAXN +j]);
printf("\n");
}
*/
return 0;
}
void initFree(struct model *hm) {
int i, j, k, m, b, pi;
double rho, c0, pmin;
double dr;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 50.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1; // x4
pi = 0;
for (k = 0; k < 10; k++) {
for (j = 0; j < 10; j++) {
for (i = 0; i < 10; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = 0; k < 10; k++) {
for (j = -2; j < -1; j++) {
for (i = 0; i < 10; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = 2.0e-3; //1.0e-3;
hRun.tsn = 600; //1000;
hRun.ssi = 200;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
printf("Freefall\n");
printf("Particles: %i \n", hm->pn);
}
void initBox(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 1;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 40.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1f / q;
pi = 0;
for (k = 0; k < 10 * q; k++) {
for (j = 0; j < 10 * q; j++) {
for (i = 0; i < 15 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < 20 * q +2; k++) {
for (j = -2; j < -1; j++) {
for (i = -2; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < -1; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = -2; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = 20 * q +1; k < 20 * q +2 ; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = -2; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 20 * q +1; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = -2; i < -1; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 20 * q +1; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = 20 * q +1; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = dr / c0;
hRun.tsn = 800 * q;
hRun.ssi = 20 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
printf("Box\n");
printf("Particles: %i \n", hm->pn);
}
void initBath(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 15;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 40.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1f / q;
pi = 0;
for (k = 0; k < 10 * q; k++) {
for (j = 0; j < 10 * q; j++) {
for (i = 0; i < 10 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < 10 * q +2; k++) {
for (j = -2; j < -1; j++) {
for (i = -2; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < -1; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = -2; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = 10 * q +1; k < 10 * q +2 ; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = -2; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 10 * q +1; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = -2; i < -1; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 10 * q +1; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = 10 * q +1; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2 * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = dr / c0;
hRun.tsn = 1000 * q;
hRun.ssi = 20 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hOut[0].oX = 5 * q * dr;
hOut[0].oY = dr;
hOut[0].oZ = 5 * q * dr;
hOut[0].nX = 0.0f;
hOut[0].nY = 1.0f;
hOut[0].nZ = 0.0f;
hOut[0].R = 2.0f*q*dr;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 0.f * q * dr;
hIn[0].oY = 15.f * q * dr;
hIn[0].oZ = 5.f * q * dr;
hIn[0].nX = 1.0f;
hIn[0].nY = 0.0f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 1.5f;
hIn[0].R = 2.0f *q*dr;
printf("Bath\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void initChannel(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 1;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 20.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1f / q;
pi = 0;
/*
for (k = 0; k < 10 * q; k++) {
for (j = 0; j < 10 * q; j++) {
for (i = 0; i < 10 * q; i++) {
hm->PosX[pi] = i * dr + 0.0 * dr;
hm->PosY[pi] = j * dr + 0.0 * dr;
hm->PosZ[pi] = k * dr + 0.0 * dr;
hm->VelX[pi] = 0.0;
hm->VelY[pi] = 0.0;
hm->VelZ[pi] = 0.0;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0;
hm->Pressure[pi] = 1.0;
pi++;
}
}
}
*/
for (k = -10 * q -2; k <= 10 * q +2; k++) {
for (j = -2; j <= -2; j++) {
for (i = 0; i <= 100 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -10 * q -2; k <= -10 * q -2; k++) {
for (j = -1; j <= 15 * q; j++) {
for (i = 0; i < 100 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = 10 * q +2; k <= 10 * q +2 ; k++) {
for (j = -1; j <= 15 * q; j++) {
for (i = 0; i <= 100 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -0.5f;
hRun.maxX = 10.5f;
hRun.minY = -0.5f;
hRun.maxY = 3.0f;
hRun.minZ = -1.5f;
hRun.maxZ = 1.5f;
hRun.dt = dr / c0;
hRun.tsn = 10000 * q;
hRun.ssi = 200 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hFix[0].minX = hRun.minX;
hFix[0].maxX = 2.0f * q * dr;
hFix[0].minY = hRun.minY;
hFix[0].maxY = hRun.maxY;
hFix[0].minZ = hRun.minZ;
hFix[0].maxZ = hRun.maxZ;
hFix[0].vx = 2.0f;
hFix[0].vy = 0.0f;
hFix[0].vz = 0.0f;
hFix[0].tau = 10.0 * hRun.dt;
hFix[1].minX = 97 * q * dr;
hFix[1].maxX = hRun.maxX;
hFix[1].minY = hRun.minY;
hFix[1].maxY = hRun.maxY;
hFix[1].minZ = hRun.minZ;
hFix[1].maxZ = hRun.maxZ;
hFix[1].vx = 2.0f;
hFix[1].vy = 0.0f;
hFix[1].vz = 0.0f;
hFix[1].tau = 10.0 * hRun.dt;
hOut[0].oX = 100 * q * dr;
hOut[0].oY = 5 * q * dr;
hOut[0].oZ = 0.0f;
hOut[0].nX = -1.0f;
hOut[0].nY = 0.0f;
hOut[0].nZ = 0.0f;
hOut[0].R = 20.0f*q*dr;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 10.f * q * dr;
hIn[0].oY = 20.f * q * dr;
hIn[0].oZ = 0.f * q * dr;
hIn[0].nX = 0.5f;
hIn[0].nY = -0.5f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 2.0f;
hIn[0].R = 10.0f *q*dr;
printf("Channel\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void initESS(struct model *hm) {
int i, m, b;
double rho, cm, cb, pmin;
double dr;
FILE *stream;
float fv1, fv2, fv3;
m = 1;
b = 2;
rho = 10388.0f;
cm = 17.4f;
cb = 20.0f;
pmin = -1.0e6;
dr = 4.0e-3;
hMatType[m] = 5;
hMatProp[m][0] = rho;
hMatProp[m][1] = cm;
hMatProp[m][2] = 2.66;
hMatProp[m][3] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = cb;
hMatProp[b][2] = pmin;
hRun.minX = -1.0f;
hRun.maxX = 2.0f;
hRun.minY = -0.2f;
hRun.maxY = 0.6f;
hRun.minZ = -0.2f;
hRun.maxZ = 0.2f;
hRun.dt = dr / cm;
hRun.dt = 0.2e-3;
hRun.tsn = 10000;
hRun.ssi = 200;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hFix[0].minX = 0.90f;
hFix[0].maxX = 1.00f;
hFix[0].minY = hRun.minY;
hFix[0].maxY = hRun.maxY;
hFix[0].minZ = hRun.minZ;
hFix[0].maxZ = hRun.maxZ;
hFix[0].vx = -1.5f;
hFix[0].vy = 0.0f;
hFix[0].vz = 0.0f;
hFix[0].tau = 8.0 * hRun.dt;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 1.40f;
hIn[0].oY = 0.5f;
hIn[0].oZ = 0.05f;
hIn[0].nX = 0.0f;
hIn[0].nY = -1.0f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 3.0f;
hIn[0].R = 0.04f;
hIn[1].Material = m;
hIn[1].Mass = rho * dr * dr * dr;
hIn[1].Smooth = 1.2f * dr;
hIn[1].Density = 0.0f;
hIn[1].Energy = 0.0f;
hIn[1].oX = 1.40f;
hIn[1].oY = 0.5f;
hIn[1].oZ = -0.05f;
hIn[1].nX = 0.0f;
hIn[1].nY = -1.0f;
hIn[1].nZ = 0.0f;
hIn[1].Velocity = 3.0f;
hIn[1].R = 0.04f;
// Stream file position
stream = fopen("surface.txt", "r");
for (i = 0; !feof(stream); i++) {
fscanf(stream, "%e %e %e", &fv1, &fv2, &fv3);
hm->PosX[i] = fv1;
hm->PosY[i] = fv2;
hm->PosZ[i] = fv3;
}
fclose(stream);
hm->pn = i;
for (i = 0; i < hm->pn; i++) {
hm->VelX[i] = 0.0f;
hm->VelY[i] = 0.0f;
hm->VelZ[i] = 0.0f;
hm->Material[i] = 2;
hm->Mass[i] = rho * powf(dr, 3);
hm->Smooth[i] = 1.2f*dr;
hm->Density[i] = 0.0f;
hm->Energy[i] = 0.0f;
hm->Pressure[i] = 0.0f;
hm->Sound[i] = cb;
}
printf("ESS\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void steadyESS(struct model *hm) {
int m, b, i;
double rho, cm, cb, pmin;
double dr;
m = 1;
b = 2;
rho = 10388.0f;
cm = 17.400f;
cb = 20.0f;
pmin = -150.0e3;
dr = 4.0e-3;
hMatType[m] = 5;
hMatProp[m][0] = rho;
hMatProp[m][1] = cm;
hMatProp[m][2] = 0.0f*2.66f;
hMatProp[m][3] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = cb;
hMatProp[b][2] = pmin;
hRun.minX = -1.0f;
hRun.maxX = 2.0f;
hRun.minY = -0.2f;
hRun.maxY = 0.6f;
hRun.minZ = -0.2f;
hRun.maxZ = 0.2f;
hRun.dt = dr / cm;
hRun.dt = 0.2e-3;
hRun.tsn = 10000;
hRun.ssi = 1000;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hFix[0].minX = 0.90f;
hFix[0].maxX = 1.00f;
hFix[0].minY = hRun.minY;
hFix[0].maxY = hRun.maxY;
hFix[0].minZ = hRun.minZ;
hFix[0].maxZ = hRun.maxZ;
hFix[0].vx = -1.5f * 1.50f;
hFix[0].vy = 0.0f;
hFix[0].vz = 0.0f;
hFix[0].tau = 8.0 * hRun.dt;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 1.40f;
hIn[0].oY = 0.5f;
hIn[0].oZ = 0.05f;
hIn[0].nX = 0.0f;
hIn[0].nY = -1.0f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 3.0f * 1.50f;
hIn[0].R = 0.04f;
hIn[1].Material = m;
hIn[1].Mass = rho * dr * dr * dr;
hIn[1].Smooth = 1.2f * dr;
hIn[1].Density = 0.0f;
hIn[1].Energy = 0.0f;
hIn[1].oX = 1.40f;
hIn[1].oY = 0.5f;
hIn[1].oZ = -0.05f;
hIn[1].nX = 0.0f;
hIn[1].nY = -1.0f;
hIn[1].nZ = 0.0f;
hIn[1].Velocity = 3.0f * 1.50f;
hIn[1].R = 0.04f;
scanData(hm);
for (i = 0; i < hm->pn; i++) {
/*
hm->Density[i] = 0.0f;
hm->Energy[i] = 0.0f;
hm->Pressure[i] = 0.0f;
hm->Sound[i] = cb;
*/
}
printf("ESS\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void initDamBreak(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 4;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 20.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.025f / q;
pi = 0;
for (k = -20 * q; k <= 20 * q; k++) {
for (j = 0; j <= 22 * q; j++) {
for (i = 0; i <= 49 * q; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -2; k <= 20 * q +2; k++) {
for (j = -2; j <= -2; j++) {
for (i = -80 * q -2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -2; k <= -20 * q -2; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = -80 * q -2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = 20 * q +2; k <= 20 * q +2; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = -80 * q -2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -1; k <= 20 * q +1; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = -80 * q -2; i <= -80 * q -2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -1; k <= 20 * q +1; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = 49 * q +2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -8 * q -1; k <= 8 * q +1; k++) {
for (j = -0; j <= 6 * q -1; j++) {
for (i = -53 * q +1; i <= -47 * q -1; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = dr / c0;
hRun.tsn = 2000 * q;
hRun.ssi = 40 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
printf("Dam break\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
int iSort(int *array, int *perm, int n) {
int i;
static int* dummy = NULL;
if (!dummy) dummy = (int *) malloc(MAXP * sizeof(int));
for (i = 0; i < n; i++) dummy[i] = array[i];
for (i = 0; i < n; i++) array[i] = dummy[perm[i]];
return 0;
}
int fSort(float *array, int *perm, int n) {
int i;
static float* dummy = NULL;
if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float));
for (i = 0; i < n; i++) dummy[i] = array[i];
for (i = 0; i < n; i++) array[i] = dummy[perm[i]];
return 0;
}
int mapCompare(const void *a, const void *b)
{
int c;
struct pair m1, m2;
c = 0;
m1 = *(struct pair*)a;
m2 = *(struct pair*)b;
if (m1.key < m2.key) c = -1;
if (m1.key > m2.key) c = 1;
return c;
}
void updateHashHost(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip, ix, iy, iz, ic;
for (ip = 0; ip < pn; ip++) {
ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
if (ic < 0) ic = 0;
if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1;
Hash[ip] = ic;
}
}
__global__ void updateHashDevice(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip, ix, iy, iz, ic;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
if (ic < 0) ic = 0;
if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1;
Hash[ip] = ic;
}
}
void checkOutHost(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
float dn, dr;
int ip, i;
for (ip = 0; ip < pn; ip++) {
for (i = 0; i < 10; i++) {
dn = 0.0f;
dn += (PosX[ip] - hOut[i].oX) * hOut[i].nX;
dn += (PosY[ip] - hOut[i].oY) * hOut[i].nY;
dn += (PosZ[ip] - hOut[i].oZ) * hOut[i].nZ;
dr = 0.0f;
dr += powf((PosX[ip] - hOut[i].oX) - dn * hOut[i].nX, 2);
dr += powf((PosY[ip] - hOut[i].oY) - dn * hOut[i].nY, 2);
dr += powf((PosZ[ip] - hOut[i].oZ) - dn * hOut[i].nZ, 2);
dr = sqrtf(dr);
if ((dn < 0.0f) && (dr < hOut[i].R)) {
Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
if ((PosX[ip] > hRun.maxX) ||
(PosX[ip] < hRun.minX) ||
(PosY[ip] > hRun.maxY) ||
(PosY[ip] < hRun.minY) ||
(PosZ[ip] > hRun.maxZ) ||
(PosZ[ip] < hRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
__global__ void checkOutDevice(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
float dn, dr;
int ip, i;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
for (i = 0; i < 10; i++) {
dn = 0.0f;
dn += (PosX[ip] - dOut[i].oX) * dOut[i].nX;
dn += (PosY[ip] - dOut[i].oY) * dOut[i].nY;
dn += (PosZ[ip] - dOut[i].oZ) * dOut[i].nZ;
dr = 0.0f;
dr += powf((PosX[ip] - dOut[i].oX) - dn * dOut[i].nX, 2);
dr += powf((PosY[ip] - dOut[i].oY) - dn * dOut[i].nY, 2);
dr += powf((PosZ[ip] - dOut[i].oZ) - dn * dOut[i].nZ, 2);
dr = sqrtf(dr);
if ((dn < 0.0f) && (dr < dOut[i].R)) {
Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
if ((PosX[ip] > dRun.maxX) ||
(PosX[ip] < dRun.minX) ||
(PosY[ip] > dRun.maxY) ||
(PosY[ip] < dRun.minY) ||
(PosZ[ip] > dRun.maxZ) ||
(PosZ[ip] < dRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
void inletHost(struct model *hm) {
int i, iu, iv, n, p;
float3 u, v, w, r;
int material[MAXPI];
float mass[MAXPI], smooth[MAXPI];
float posX[MAXPI], posY[MAXPI], posZ[MAXPI];
float velX[MAXPI], velY[MAXPI], velZ[MAXPI];
float density[MAXPI], energy[MAXPI];
p = 0;
for (i = 0; i < 10; i++) if (hIn[i].Material != 0) {
hIn[i].Distance += hIn[i].Velocity * hRun.dt;
if (hIn[i].Distance > hIn[i].Smooth / 1.2f) {
hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f;
w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ);
w = normalize(w);
if ((w.x <= w.y) && (w.x <= w.z)) u = make_float3(1.f, 0.f, 0.f);
if ((w.y <= w.x) && (w.y <= w.z)) u = make_float3(0.f, 1.f, 0.f);
if ((w.z <= w.x) && (w.z <= w.y)) u = make_float3(0.f, 0.f, 1.f);
v = cross(w, u);
n = (int) roundf(1.2f * hIn[i].R / hIn[i].Smooth);
for (iv = -n; iv <= n; iv++) {
for (iu = -n; iu <= n; iu++) {
r = iu * u + iv * v;
r *= hIn[i].Smooth / 1.2f;
if (length(r) < hIn[i].R) {
material[p] = hIn[i].Material;
mass[p] = hIn[i].Mass;
smooth[p] = hIn[i].Smooth;
posX[p] = hIn[i].oX + r.x;
posY[p] = hIn[i].oY + r.y;
posZ[p] = hIn[i].oZ + r.z;
velX[p] = hIn[i].Velocity * w.x;
velY[p] = hIn[i].Velocity * w.y;
velZ[p] = hIn[i].Velocity * w.z;
density[p] = hIn[i].Density;
energy[p] = hIn[i].Energy;
p++;
}
}
}
}
}
hipMemcpy(hm->Material + hm->pn, material, (p * sizeof(int)), hipMemcpyHostToHost);
hipMemcpy(hm->Mass + hm->pn, mass, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->Smooth + hm->pn, smooth, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->PosX + hm->pn, posX, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->PosY + hm->pn, posY, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->PosZ + hm->pn, posZ, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->VelX + hm->pn, velX, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->VelY + hm->pn, velY, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->VelZ + hm->pn, velZ, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->Density + hm->pn, density, (p * sizeof(float)), hipMemcpyHostToHost);
hipMemcpy(hm->Energy + hm->pn, energy, (p * sizeof(float)), hipMemcpyHostToHost);
hm->pn += p;
}
void inletDevice(struct model *dm) {
int i, iu, iv, n, p;
float3 u, v, w, r;
int material[MAXPI];
float mass[MAXPI], smooth[MAXPI];
float posX[MAXPI], posY[MAXPI], posZ[MAXPI];
float velX[MAXPI], velY[MAXPI], velZ[MAXPI];
float density[MAXPI], energy[MAXPI];
p = 0;
for (i = 0; i < 10; i++) if (hIn[i].Material != 0) {
hIn[i].Distance += hIn[i].Velocity * hRun.dt;
if (hIn[i].Distance > hIn[i].Smooth / 1.2f) {
hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f;
w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ);
w = normalize(w);
if ((fabsf(w.x) <= fabsf(w.y)) && (fabsf(w.x) <= fabsf(w.z))) u = make_float3(1.f, 0.f, 0.f);
if ((fabsf(w.y) <= fabsf(w.x)) && (fabsf(w.y) <= fabsf(w.z))) u = make_float3(0.f, 1.f, 0.f);
if ((fabsf(w.z) <= fabsf(w.x)) && (fabsf(w.z) <= fabsf(w.y))) u = make_float3(0.f, 0.f, 1.f);
v = cross(w, u);
n = (int) roundf(1.2f * hIn[i].R / hIn[i].Smooth);
for (iv = -n; iv <= n; iv++) {
for (iu = -n; iu <= n; iu++) {
r = iu * u + iv * v;
r *= hIn[i].Smooth / 1.2f;
if (length(r) < hIn[i].R) {
material[p] = hIn[i].Material;
mass[p] = hIn[i].Mass;
smooth[p] = hIn[i].Smooth;
posX[p] = hIn[i].oX + r.x;
posY[p] = hIn[i].oY + r.y;
posZ[p] = hIn[i].oZ + r.z;
velX[p] = hIn[i].Velocity * w.x;
velY[p] = hIn[i].Velocity * w.y;
velZ[p] = hIn[i].Velocity * w.z;
density[p] = hIn[i].Density;
energy[p] = hIn[i].Energy;
p++;
}
}
}
}
}
hipMemcpy(dm->Material + dm->pn, material, (p * sizeof(int)), hipMemcpyHostToDevice);
hipMemcpy(dm->Mass + dm->pn, mass, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Smooth + dm->pn, smooth, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosX + dm->pn, posX, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosY + dm->pn, posY, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->PosZ + dm->pn, posZ, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelX + dm->pn, velX, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelY + dm->pn, velY, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->VelZ + dm->pn, velZ, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Density + dm->pn, density, (p * sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(dm->Energy + dm->pn, energy, (p * sizeof(float)), hipMemcpyHostToDevice);
dm->pn += p;
}
void updateSetsHost(const int pn, int *SetStart, int *SetStop,
const int* Hash) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip;
int hash, nextHash, prevHash;
for (ip = 0; ip < pn; ip++) {
hash = Hash[ip];
if (ip == 0) prevHash = -1;
else prevHash = Hash[ip -1];
if (ip == pn -1) nextHash = -1;
else nextHash = Hash[ip +1];
if (hash != prevHash) SetStart[hash] = ip;
if (hash != nextHash) SetStop[hash] = ip +1;
}
}
__global__ void updateSetsDevice(const int pn, int *SetStart, int *SetStop,
const int* Hash) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
__shared__ int prevHash[THREADS];
__shared__ int nextHash[THREADS];
int ip;
int hash;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= pn) return;
hash = Hash[ip];
if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash;
if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash;
if (threadIdx.x == 0) {
if (ip == 0) prevHash[threadIdx.x] = -1;
else prevHash[threadIdx.x] = Hash[ip -1];
}
if (threadIdx.x == THREADS -1) {
if (ip == pn -1) nextHash[threadIdx.x] = -1;
else nextHash[threadIdx.x] = Hash[ip +1];
}
__syncthreads();
if (hash != prevHash[threadIdx.x]) SetStart[hash] = ip;
if (hash != nextHash[threadIdx.x]) SetStop[hash] = ip +1;
}
void updateListHost(const int pn, int *List,
const int* SetStart, const int* SetStop,
const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const struct grid Grid) {
int ip, ic, ix, iy, iz, i, j, k, jp, jc, np;
float dx, dy, dz, dr;
// Particles list is filled
for (ip = 0; ip < pn; ip++) {
ix = (int) ((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) ((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
np = 0;
for (k = -1; k <= 1; k++) {
for (j = -1; j <= 1; j++) {
for (i = -1; i <= 1; i++) {
jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY;
if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) {
for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) {
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) {
List[ip * MAXN + np] = jp;
np++;
}
}
}
}
}
}
while (np < MAXN) {
List[ip * MAXN + np] = ip;
np++;
}
}
}
__global__ void updateListDevice(const int pn, int *List,
const int* SetStart, const int* SetStop,
const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const struct grid Grid) {
int ip, ic, ix, iy, iz, i, j, k, jp, jc, np;
float dx, dy, dz, dr;
// Particles list is filled
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= pn) return;
ix = (int) ((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) ((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
np = 0;
for (k = -1; k <= 1; k++) {
for (j = -1; j <= 1; j++) {
for (i = -1; i <= 1; i++) {
jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY;
if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) {
for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) {
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) {
List[ip * MAXN + np] = jp;
np++;
}
}
}
}
}
}
while (np < MAXN) {
List[ip * MAXN + np] = ip;
np++;
}
}
int neighbourListHost(struct model *hm) {
struct pair map[MAXP];
int i, ip, pout;
updateHashHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid);
checkOutHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid);
for (ip = 0; ip < hm->pn; ip++) hm->Index[ip] = ip;
for (ip = 0; ip < hm->pn; ip++) {
map[ip].key = hm->Hash[ip];
map[ip].value = hm->Index[ip];
}
qsort(map, hm->pn, sizeof(struct pair), mapCompare);
for (ip = 0; ip < hm->pn; ip++) {
hm->Hash[ip] = map[ip].key;
hm->Index[ip] = map[ip].value;
}
iSort(hm->Material, hm->Index, hm->pn);
fSort(hm->Mass, hm->Index, hm->pn);
fSort(hm->Smooth, hm->Index, hm->pn);
fSort(hm->PosX, hm->Index, hm->pn);
fSort(hm->PosY, hm->Index, hm->pn);
fSort(hm->PosZ, hm->Index, hm->pn);
fSort(hm->VelX, hm->Index, hm->pn);
fSort(hm->VelY, hm->Index, hm->pn);
fSort(hm->VelZ, hm->Index, hm->pn);
fSort(hm->Density, hm->Index, hm->pn);
fSort(hm->Energy, hm->Index, hm->pn);
fSort(hm->Pressure, hm->Index, hm->pn);
fSort(hm->Sound, hm->Index, hm->pn);
pout = 0;
for (ip = 0; ip < hm->pn; ip++) if (hm->Hash[ip] == hGrid.nX * hGrid.nY * hGrid.nZ) pout++;
hm->pn -= pout;
for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStart[i] = 0;
for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStop[i] = 0;
updateSetsHost(hm->pn, hm->SetStart, hm->SetStop, hm->Hash);
updateListHost(hm->pn, hm->List, hm->SetStart, hm->SetStop, hm->Smooth,
hm->PosX, hm->PosY, hm->PosZ, hGrid);
return 0;
}
int neighbourListDevice(struct model *dm) {
int pout;
int blocks, threads;
blocks = (dm->pn + THREADS - 1) / THREADS;
threads = THREADS;
thrust::device_ptr<int> tIndex(dm->Index);
thrust::device_ptr<int> tHash(dm->Hash);
thrust::device_ptr<int> tMaterial(dm->Material);
thrust::device_ptr<float> tMass(dm->Mass);
thrust::device_ptr<float> tSmooth(dm->Smooth);
thrust::device_ptr<float> tPosX(dm->PosX);
thrust::device_ptr<float> tPosY(dm->PosY);
thrust::device_ptr<float> tPosZ(dm->PosZ);
thrust::device_ptr<float> tVelX(dm->VelX);
thrust::device_ptr<float> tVelY(dm->VelY);
thrust::device_ptr<float> tVelZ(dm->VelZ);
thrust::device_ptr<float> tDensity(dm->Density);
thrust::device_ptr<float> tEnergy(dm->Energy);
thrust::device_ptr<float> tPressure(dm->Pressure);
thrust::device_ptr<float> tSound(dm->Sound);
thrust::device_ptr<int> tIntDummy(dm->IntDummy);
thrust::device_ptr<float> tFloatDummy(dm->FloatDummy);
thrust::device_ptr<int> tSetStart(dm->SetStart);
thrust::device_ptr<int> tSetStop(dm->SetStop);
hipLaunchKernelGGL(( updateHashDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, hGrid);
hipLaunchKernelGGL(( checkOutDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, hGrid);
thrust::sequence(tIndex, tIndex + dm->pn, 0);
thrust::sort_by_key(tHash, tHash + dm->pn, tIndex);
thrust::copy(tMaterial, tMaterial + dm->pn, tIntDummy);
thrust::gather(tIndex, tIndex + dm->pn, tIntDummy, tMaterial);
thrust::copy(tMass, tMass + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tMass);
thrust::copy(tSmooth, tSmooth + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSmooth);
thrust::copy(tPosX, tPosX + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosX);
thrust::copy(tPosY, tPosY + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosY);
thrust::copy(tPosZ, tPosZ + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosZ);
thrust::copy(tVelX, tVelX + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelX);
thrust::copy(tVelY, tVelY + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelY);
thrust::copy(tVelZ, tVelZ + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelZ);
thrust::copy(tDensity, tDensity + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tDensity);
thrust::copy(tEnergy, tEnergy + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tEnergy);
thrust::copy(tPressure, tPressure + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPressure);
thrust::copy(tSound, tSound + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSound);
pout = thrust::count(tHash, tHash + dm->pn, hGrid.nX * hGrid.nY * hGrid.nZ);
dm->pn -= pout;
thrust::fill(tSetStart, tSetStart + hGrid.nX * hGrid.nY * hGrid.nZ, 0);
thrust::fill(tSetStop, tSetStop + hGrid.nX * hGrid.nY * hGrid.nZ, 0);
hipLaunchKernelGGL(( updateSetsDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, dm->SetStart, dm->SetStop, dm->Hash);
hipLaunchKernelGGL(( updateListDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, dm->List, dm->SetStart, dm->SetStop, dm->Smooth,
dm->PosX, dm->PosY, dm->PosZ, hGrid);
return 0;
}
int RKstepHost(struct model *hm, float alpha) {
int ip;
for (ip = 0; ip < hm->pn; ip++) {
hm->VelDotX[ip] = 0.0f;
hm->VelDotY[ip] = 0.0f;
hm->VelDotZ[ip] = 0.0f;
hm->DensityDot[ip] = 0.0f;
hm->EnergyDot[ip] = 0.0f;
}
// External loads
updateLoadsHost(hm->pn, hm->Material,
hm->PosX, hm->PosY, hm->PosZ,
hm->VelX, hm->VelY, hm->VelZ,
hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->EnergyDot);
// Calculate particle interactions
balanceMassMomentumHost(hm->pn, hm->List, hm->Material, hm->Mass, hm->Smooth,
hm->PosX, hm->PosY, hm->PosZ,
hm->VelX, hm->VelY, hm->VelZ,
hm->Density, hm->Pressure, hm->Sound,
hm->DensityDot, hm->VelDotX, hm->VelDotY, hm->VelDotZ);
balanceEnergyHost(hm->pn, hm->Material, hm->Pressure, hm->Density,
hm->DensityDot, hm->EnergyDot);
// Update particles
updateParticlesHost(hm->pn, alpha, hm->Material,
hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->DensityDot, hm->EnergyDot,
hm->PosX0, hm->PosY0, hm->PosZ0,
hm->VelX0, hm->VelY0, hm->VelZ0, hm->Density0, hm->Energy0,
hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ,
hm->Density, hm->Energy, hm->Pressure, hm->Sound);
return 0;
}
int RKstepDevice(struct model *dm, float alpha, float time) {
int blocks, threads;
blocks = (dm->pn + THREADS - 1) / THREADS;
threads = THREADS;
thrust::device_ptr<float> tVelDotX(dm->VelDotX);
thrust::device_ptr<float> tVelDotY(dm->VelDotY);
thrust::device_ptr<float> tVelDotZ(dm->VelDotZ);
thrust::device_ptr<float> tDensityDot(dm->DensityDot);
thrust::device_ptr<float> tEnergyDot(dm->EnergyDot);
thrust::fill(tVelDotX, tVelDotX + dm->pn, 0.0f);
thrust::fill(tVelDotY, tVelDotY + dm->pn, 0.0f);
thrust::fill(tVelDotZ, tVelDotZ + dm->pn, 0.0f);
thrust::fill(tDensityDot, tDensityDot + dm->pn, 0.0f);
thrust::fill(tEnergyDot, tEnergyDot + dm->pn, 0.0f);
// External loads
hipLaunchKernelGGL(( updateLoadsDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, dm->Material,
dm->PosX, dm->PosY, dm->PosZ,
dm->VelX, dm->VelY, dm->VelZ,
dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->EnergyDot, time);
// Calculate particle interactions
hipLaunchKernelGGL(( balanceMassMomentumDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, dm->List, dm->Material, dm->Mass, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ,
dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Pressure, dm->Sound,
dm->DensityDot, dm->VelDotX, dm->VelDotY, dm->VelDotZ);
hipLaunchKernelGGL(( balanceEnergyDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, dm-> Material, dm->Pressure, dm->Density, dm->DensityDot, dm->EnergyDot);
// Update particles
hipLaunchKernelGGL(( updateParticlesDevice) , dim3(blocks), dim3(threads) , 0, 0,
dm->pn, alpha, dm->Material, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->DensityDot, dm->EnergyDot,
dm->PosX0, dm->PosY0, dm->PosZ0, dm->VelX0, dm->VelY0, dm->VelZ0, dm->Density0, dm->Energy0,
dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Energy, dm->Pressure, dm->Sound);
return 0;
}
int RKintegrateHost(struct model *hm) {
/**
* \brief Runge Kutta 3rd order time integration
*
* Integrate the Navier Stokes equations in time with the
* Total Variation Diminishing Runge-Kutta algorithm of the 3rd order
*
* \date Dec 20, 2010
* \author Luca Massidda
*/
int ts;
// TIME CYCLE
for (ts = 0; ts <= hRun.tsn; ts++) {
// Output data
if ((ts % hRun.ssi) == 0) {
printf("Saving time: %g \n", ts * hRun.dt);
printf("Particles: %i \n", hm->pn);
printData(hm);
outputVTK(hm, ts / hRun.ssi);
}
// Inlet conditions
inletHost(hm);
// Calculate neighbour list
neighbourListHost(hm);
// Save initial condition
backupDataHost(hm);
// Step 1
RKstepHost(hm, 1.0);
// Step 2
RKstepHost(hm, 1.0 / 4.0);
// Step 3
RKstepHost(hm, 2.0 / 3.0);
}
return 0;
}
int RKintegrateDevice(struct model *hm, struct model *dm) {
/**
* \brief Runge Kutta 3rd order time integration
*
* Integrate the Navier Stokes equations in time with the
* Total Variation Diminishing Runge-Kutta algorithm of the 3rd order
*
* \date Dec 20, 2010
* \author Luca Massidda
*/
int ts;
// TIME CYCLE
for (ts = 0; ts <= hRun.tsn; ts++) {
// Output data
if ((ts % hRun.ssi) == 0) {
copyDeviceToHost(dm, hm);
printf("Saving time: %g \n", ts * hRun.dt);
printf("Particles: %i \n", hm->pn);
printData(hm);
outputVTK(hm, ts / hRun.ssi);
}
// Inlet conditions
inletDevice(dm);
// Calculate neighbour list
neighbourListDevice(dm);
// Save initial condition
backupDataDevice(dm);
// Step 1
RKstepDevice(dm, 1.0, ts * hRun.dt);
// Step 2
RKstepDevice(dm, 1.0 / 4.0, ts * hRun.dt);
// Step 3
RKstepDevice(dm, 2.0 / 3.0, ts * hRun.dt);
}
return 0;
}
int main() {
/**
* \brief armando2D v2.0
*
* An SPH code for non stationary fluid dynamics.
* This is the reviewed and improved C version of Armando v1.0
* developed at CERN in 2008
*
* \date Oct 20, 2010
* \author Luca Massidda
*/
struct model hModel, dModel;
int i;
initHost(&hModel);
for (i = 0; i < 10; i++) {
hLoad[i].gx = 0.0f;
hLoad[i].gy = 0.0f;
hLoad[i].gz = 0.0f;
hLoad[i].w = 0.0f;
hOut[i].nX = 0.0f;
hOut[i].nY = 0.0f;
hOut[i].nZ = 0.0f;
}
//initBox(&hModel);
//initBath(&hModel);
//initDamBreak(&hModel);
//initChannel(&hModel);
//initESS(&hModel);
steadyESS(&hModel);
initDevice(&dModel);
copyHostToDevice(&hModel, &dModel);
RKintegrateDevice(&hModel, &dModel);
//RKintegrateHost(&hModel);
return 0;
}
| 095a5ca65696df501cf37a871870906b01cf67fb.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#define PI 3.14159f
#define MAXP 2096552
#define MAXPI 8192
#define MAXN 96
#define MAXG 4193104
#define THREADS 256
struct pair {
int key;
int value;
};
struct grid {
float oX, oY, oZ;
float size;
int nX, nY, nZ;
};
struct simulation {
float minX, maxX;
float minY, maxY;
float minZ, maxZ;
float dt;
int tsn;
int ssi;
int nsi;
};
struct load {
float minX, maxX;
float minY, maxY;
float minZ, maxZ;
float gx, gy, gz;
float w;
};
struct fix {
float minX, maxX;
float minY, maxY;
float minZ, maxZ;
float vx, vy, vz;
float tau;
};
struct outlet {
float oX, oY, oZ;
float nX, nY, nZ;
float R;
};
struct inlet {
float oX, oY, oZ;
float nX, nY, nZ;
float R;
int Material;
float Mass, Smooth;
float Velocity;
float Density, Energy;
float Distance;
};
struct model {
int pn;
int* Material;
float* Mass;
float* Smooth;
float* PosX;
float* PosY;
float* PosZ;
float* VelX;
float* VelY;
float* VelZ;
float* Density;
float* Energy;
float* Pressure;
float* Sound;
float* VelDotX;
float* VelDotY;
float* VelDotZ;
float* DensityDot;
float* EnergyDot;
float* PosX0;
float* PosY0;
float* PosZ0;
float* VelX0;
float* VelY0;
float* VelZ0;
float* Density0;
float* Energy0;
int* List;
int* Hash;
int* Index;
int* SetStart;
int* SetStop;
int* IntDummy;
float* FloatDummy;
};
// Host Variables
int hMatType[10];
float hMatProp[10][10];
struct simulation hRun;
struct grid hGrid;
struct load hLoad[10];
struct fix hFix[10];
struct outlet hOut[10];
struct inlet hIn[10];
// Device Variables
__device__ int dMatType[10];
__device__ float dMatProp[10][10];
__device__ struct simulation dRun;
__device__ struct grid dGrid;
__device__ struct load dLoad[10];
__device__ struct fix dFix[10];
__device__ struct outlet dOut[10];
__device__ struct inlet dIn[10];
__host__ __device__ float kernelWendland(float r, float h) {
float q, alpha, w;
/**
* \brief Wendland kernel
*
* \date Feb 8, 2011
* \author Luca Massidda
*/
q = r / h;
// for 3D
alpha = 15.0f / (16.0f * PI * h * h * h);
// for 2D
//alpha = 7.0f / (4.0f * PI * h * h);
w = 0.0f;
if (q < 2) {
w = powf((1.0f - 0.5f*q),4);
w *= 1.0f + 2.0f*q;
w *= alpha;
}
return w;
}
__host__ __device__ float kernelDerivWendland(float r, float h) {
float q, alpha, dwdr;
/**
* \brief Wendland kernel derivative
*
* \date Feb 8, 2011
* \author Luca Massidda
*/
q = r / h;
// for 3D
alpha = 15.0f / (16.0f * PI * h * h * h);
// for 2D
//alpha = 7.0f / (4.0f * PI * h * h);
dwdr = 0.0f;
if (q < 2) {
dwdr = 5.0f / 8.0f * q * powf((q - 2.0f), 3) ;
dwdr *= alpha / h;
}
return dwdr;
}
// Global code
void balanceMassMomentumHost(const int pn, const int* List,
const int* Material, const float* Mass, const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
const float* Density, const float* Pressure, const float* Sound,
float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) {
/**
* \brief Interate particles
*
* \date Jan 6, 2011
* \author Luca Massidda
*/
int ip, il, jp;
float iDensityDot;
float iVelDotX, iVelDotY, iVelDotZ;
float iSmooth, jMass;
float dx, dy, dz, dr, dvr, dwdr, f, w;
for (ip = 0; ip < pn; ip++) {
iDensityDot = 0.0f;
iVelDotX = 0.0f;
iVelDotY = 0.0f;
iVelDotZ = 0.0f;
iSmooth = Smooth[ip];
for (il = 0; il < MAXN; il++) {
jp = List[ip * MAXN + il];
jMass = Mass[jp];
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth;
w = kernelWendland(dr, iSmooth);
dwdr = kernelDerivWendland(dr, iSmooth);
dvr = 0.0f;
dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]);
dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]);
dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]);
iDensityDot += jMass * dvr * dwdr / dr;
// Calculate interparticle pressure action
f = -(Pressure[ip] / powf((Density[ip] + hMatProp[Material[ip]][0]), 2)
+ Pressure[jp] / powf((Density[jp] + hMatProp[Material[jp]][0]), 2));
iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr;
// Calculate shock correction for mass
f = Density[ip] - Density[jp];
f *= 2.0f * Sound[ip];
f /= Density[ip] + Density[jp] + hMatProp[Material[ip]][0] + hMatProp[Material[jp]][0];
iDensityDot += jMass * f * dwdr;
// Calculate shock correction for momentum
if (dvr < 0.0f) f = dvr;
else f = 0.0f;
f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth);
f *= 2.0f * Sound[ip];
f /= Density[ip] + Density[jp] + hMatProp[Material[ip]][0] + hMatProp[Material[jp]][0];
f *= 0.03f;
iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr;
// Calculate boundary repulsion
if (Material[ip] != Material[jp]) {
f = w * powf(Smooth[jp] * Sound[jp], 2);
iVelDotX += jMass / (Mass[ip] + jMass) * f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += jMass / (Mass[ip] + jMass) * f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += jMass / (Mass[ip] + jMass) * f * (PosZ[ip] - PosZ[jp]) / dr;
}
}
DensityDot[ip] += iDensityDot;
VelDotX[ip] += iVelDotX;
VelDotY[ip] += iVelDotY;
VelDotZ[ip] += iVelDotZ;
}
}
__global__ void balanceMassMomentumDevice(const int pn, const int* List,
const int* Material, const float* Mass, const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
const float* Density, const float* Pressure, const float* Sound,
float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) {
/**
* \brief Interate particles
*
* \date Jan 6, 2011
* \author Luca Massidda
*/
int ip, il, jp;
float iDensityDot;
float iVelDotX, iVelDotY, iVelDotZ;
float iSmooth, jMass;
volatile float dx, dy, dz, dr, dvr, dwdr, f, phi, w;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
iDensityDot = 0.0f;
iVelDotX = 0.0f;
iVelDotY = 0.0f;
iVelDotZ = 0.0f;
iSmooth = Smooth[ip];
for (il = 0; il < MAXN; il++) {
jp = List[ip * MAXN + il];
jMass = Mass[jp];
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth;
w = kernelWendland(dr, iSmooth);
dwdr = kernelDerivWendland(dr, iSmooth);
if (Material[ip] == Material[jp]) {
dvr = 0.0f;
dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]);
dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]);
dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]);
iDensityDot += jMass * dvr * dwdr / dr;
// Calculate interparticle pressure action
f = -(Pressure[ip] / powf((Density[ip] + dMatProp[Material[ip]][0]), 2)
+ Pressure[jp] / powf((Density[jp] + dMatProp[Material[jp]][0]), 2));
f *= jMass * dwdr;
iVelDotX += f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr;
// Calculate shock correction for mass
f = Density[ip] - Density[jp];
f *= 2.0f * Sound[ip];
f /= Density[ip] + Density[jp] + dMatProp[Material[ip]][0] + dMatProp[Material[jp]][0];
iDensityDot += jMass * f * dwdr;
// Calculate shock correction for momentum
phi = fabs(dvr * iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth));
f = -1.0f*(Sound[ip] * phi + 0.75f * phi * phi) / (Density[ip] + dMatProp[Material[ip]][0]);
f *= jMass * dwdr;
if (dvr >= 0.0f) f = 0.0f;
iVelDotX += f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr;
}
// Calculate boundary repulsion
if (Material[ip] != Material[jp]) {
f = w * powf(Smooth[jp] * Sound[jp], 2);
iVelDotX += f * (PosX[ip] - PosX[jp]) / dr;
iVelDotY += f * (PosY[ip] - PosY[jp]) / dr;
iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr;
}
}
DensityDot[ip] += iDensityDot;
VelDotX[ip] += iVelDotX;
VelDotY[ip] += iVelDotY;
VelDotZ[ip] += iVelDotZ;
}
}
void balanceEnergyHost(const int pn, const int* Material,
const float* Pressure, const float* Density,
const float* DensityDot, float* EnergyDot) {
/**
* \brief Interate particles
*
* \date Jan 9, 2011
* \author Luca Massidda
*/
int ip;
float iPressure, iDensity, iDensityDot;
float iEnergyDot;
for (ip = 0; ip < pn; ip++) {
iPressure = Pressure[ip];
iDensity = Density[ip];
iDensityDot = DensityDot[ip];
iEnergyDot = iPressure * iDensityDot;
iEnergyDot *= powf(iDensity + hMatProp[Material[ip]][0], -2.0f);
EnergyDot[ip] += iEnergyDot;
}
}
__global__ void balanceEnergyDevice(const int pn, const int* Material,
const float* Pressure, const float* Density,
const float* DensityDot, float* EnergyDot) {
/**
* \brief Interate particles
*
* \date Jan 9, 2011
* \author Luca Massidda
*/
volatile int ip;
float iPressure, iDensity, iDensityDot;
float iEnergyDot;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
iPressure = Pressure[ip];
iDensity = Density[ip];
iDensityDot = DensityDot[ip];
//iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity);
iEnergyDot = iPressure * iDensityDot;
iEnergyDot *= powf(iDensity + dMatProp[Material[ip]][0], -2.0f);
EnergyDot[ip] += iEnergyDot;
}
}
__host__ __device__ float pressureGas(float* properties, float rho, float u) {
/**
* \brief Ideal gas Equation Of State
*
* p = (k -1) rho u
* c = (k(k -1) u)^0.5
*
* k = properties[1]
* pshift = properties[2]
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float p;
p = (properties[1] - 1.0f) * (rho + properties[0]) * u;
p += properties[2];
return p;
}
__host__ __device__ float pressurePoly(float* properties, float rho, float u) {
/**
* \brief Mie-Gruneisen polynomial Equation Of State
*
* p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression
* p = t1 mu + t2 mu^2 + b0 rho0 u in tension
*
* rho0 = properties[0];
* a1 = properties[1];
* a2 = properties[2];
* a3 = properties[3];
* b0 = properties[4];
* b1 = properties[5];
* t1 = properties[6];
* t2 = properties[7];
* pmin = properties[8];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float mu;
float p;
mu = rho / properties[0];
if (mu < 0)
p = (properties[6] * mu + properties[7] * mu*mu)
+ (properties[4] * properties[0] * u);
else
p = (properties[1] * mu + properties[2] * mu*mu
+ properties[3] * mu*mu*mu)
+ ((properties[4] + properties[5] * mu)
* properties[0] * u);
//if (p < properties[8]) p = properties[8];
return p;
}
__host__ __device__ float pressureShock(float* properties, float rho, float u) {
/**
* \brief Mie-Gruneisen Shock Hugoniot Equation Of State
*
* mu = rho / rho0 -1
* g = g * rho0 / rho
* ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2
* uh = 1/2 ph/rho0 * (mu / (1 + mu))
* p = ph + g * rho * (u - uh)
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* s0 = properties[3];
* pmin = properties[4];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float mu;
float p, ph;
mu = rho / properties[0];
ph = (properties[0] * powf(properties[1], 2) * mu*(1.0f +mu))
/ powf((1.0f - (properties[3] -1.0f) * mu), 2);
p = ph + properties[2] * properties[0]
* (u - (0.5f * ph / properties[0] * (mu / (1.0f + mu))));
if (p < properties[4]) p = properties[4];
return p;
}
__host__ __device__ float pressureTait(float* properties, float rho, float u) {
/**
* \brief Tait Equation Of State
*
* p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0);
* c = c0;
*
* rho0 = properties[0];
* c0 = properties[1];
* pmin = properties[2];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float p;
//p = properties[0] * powf(properties[1], 2) / 7.0f
// * (powf((rho / properties[0]), 7) - 1.0f);
p = properties[0] * powf(properties[1], 2) / 7.0f
* (powf(rho / properties[0] +1.0f, 7) - 1.0f);
//if (p < properties[2]) p = properties[2];
return p;
}
__host__ __device__ float pressureESS(float* properties, float rho, float u) {
/**
* \brief Simple Equation Of State
*
* mu = rho / rho0 -1
* p = rho0 c0^2 mu + g0 * rho0 * u
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* pmin = properties[3];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float mu;
float p;
mu = rho / properties[0];
p = mu * properties[0] * powf(properties[1], 2) + u * properties[0] * properties[2];
if (p < properties[3]) p = properties[3];
return p;
}
__host__ __device__ float soundGas(float* properties ,float rho, float u) {
/**
* \brief Ideal gas Equation Of State
*
* p = (k -1) rho u
* c = (k(k -1) u)^0.5
*
* k = properties[1]
* pshift = properties[2]
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = sqrtf(properties[1] * (properties[1] - 1.0f) * u);
return c;
}
__host__ __device__ float soundPoly(float* properties , float rho, float u) {
/**
* \brief Mie-Gruneisen polynomial Equation Of State
*
* p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression
* p = t1 mu + t2 mu^2 + b0 rho0 u in tension
*
* rho0 = properties[0];
* a1 = properties[1];
* a2 = properties[2];
* a3 = properties[3];
* b0 = properties[4];
* b1 = properties[5];
* t1 = properties[6];
* t2 = properties[7];
* pmin = properties[8];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = sqrtf(properties[1] / (rho + properties[0]));
return c;
}
__host__ __device__ float soundShock(float* properties, float rho, float u) {
/**
* \brief Mie-Gruneisen Shock Hugoniot Equation Of State
*
* mu = rho / rho0 -1
* g = g * rho0 / rho
* ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2
* uh = 1/2 ph/rho0 * (mu / (1 + mu))
* p = ph + g * rho * (u - uh)
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* s0 = properties[3];
* pmin = properties[4];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = properties[1];
return c;
}
__host__ __device__ float soundTait(float* properties, float rho, float u) {
/**
* \brief Tait Equation Of State
*
* p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0);
* c = c0;
*
* rho0 = properties[0];
* c0 = properties[1];
* pmin = properties[2];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = properties[1];
return c;
}
__host__ __device__ float soundESS(float* properties, float rho, float u) {
/**
* \brief Simple Equation Of State
*
* mu = rho / rho0 -1
* p = rho0 c0^2 mu + g0 * rho0 * u
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* pmin = properties[3];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c;
c = properties[1];
return c;
}
__host__ __device__ float densityPoly(float* properties , float rho) {
/**
* \brief Mie-Gruneisen polynomial Equation Of State
*
* p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression
* p = t1 mu + t2 mu^2 + b0 rho0 u in tension
*
* rho0 = properties[0];
* a1 = properties[1];
* a2 = properties[2];
* a3 = properties[3];
* b0 = properties[4];
* b1 = properties[5];
* t1 = properties[6];
* t2 = properties[7];
* pmin = properties[8];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float rho0;
rho0 = properties[0];
if (rho < -0.1f * rho0) rho = -0.1f*rho0;
return rho;
}
__host__ __device__ float densityShock(float* properties, float rho) {
/**
* \brief Mie-Gruneisen Shock Hugoniot Equation Of State
*
* mu = rho / rho0 -1
* g = g * rho0 / rho
* ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2
* uh = 1/2 ph/rho0 * (mu / (1 + mu))
* p = ph + g * rho * (u - uh)
*
* rho0 = properties[0];
* c0 = properties[1];
* g0 = properties[2];
* s0 = properties[3];
* pmin = properties[4];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float c0, pmin, rhomin;
c0 = properties[1];
pmin = properties[4];
rhomin = pmin / (c0 * c0);
//if (p < properties[4]) p = properties[4];
//if (rho < -0.1f * rho0) rho = -0.1f*rho0;
if (rho < rhomin) rho = rhomin;
return rho;
}
__host__ __device__ float densityTait(float* properties, float rho) {
/**
* \brief Tait Equation Of State
*
* p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0);
* c = c0;
*
* rho0 = properties[0];
* c0 = properties[1];
* pmin = properties[2];
*
* \date Jun 10, 2010
* \author Luca Massidda
*/
float rho0;
rho0 = properties[0];
if (rho < -0.1f * rho0) rho = -0.1f*rho0;
return rho;
}
void updateParticlesHost(const int pn, const float alpha,
const int* Material,
const float* VelDotX, const float* VelDotY, const float* VelDotZ,
const float* DensityDot, const float* EnergyDot,
const float* PosX0, const float* PosY0, const float* PosZ0,
const float* VelX0, const float* VelY0, const float* VelZ0,
const float* Density0, const float* Energy0,
float* PosX, float* PosY, float* PosZ,
float* VelX, float* VelY, float* VelZ,
float* Density, float* Energy, float* Pressure, float* Sound) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip;
int iMaterial;
for (ip = 0; ip < pn; ip++) if (Material[ip] != 0) {
PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + hRun.dt * VelX[ip] - PosX0[ip]);
PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + hRun.dt * VelY[ip] - PosY0[ip]);
PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + hRun.dt * VelZ[ip] - PosZ0[ip]);
VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + hRun.dt * VelDotX[ip] - VelX0[ip]);
VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + hRun.dt * VelDotY[ip] - VelY0[ip]);
VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + hRun.dt * VelDotZ[ip] - VelZ0[ip]);
Density[ip] = Density0[ip] + alpha * (Density[ip] + hRun.dt * DensityDot[ip] - Density0[ip]);
Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + hRun.dt * EnergyDot[ip] - Energy0[ip]);
iMaterial = Material[ip];
if (hMatType[iMaterial] == 0) {
VelX[ip] = VelX0[ip];
VelY[ip] = VelY0[ip];
VelZ[ip] = VelZ0[ip];
Density[ip] = Density0[ip];
Energy[ip] = Energy0[ip];
}
switch (hMatType[iMaterial]) {
case (0) : // BOUNDARY
Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = 0.0f*pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (1) : // IDEAL GAS EOS
Pressure[ip] = pressureGas(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundGas(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS
Density[ip] = densityPoly(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressurePoly(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundPoly(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (3) : // MIE-GRUNEISEN SHOCK EOS
Density[ip] = densityShock(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureShock(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundShock(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (4) : // TAIT EOS
Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (5) : // ESS EOS
Pressure[ip] = pressureESS(hMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundESS(hMatProp[iMaterial], Density[ip], Energy[ip]);
break;
default :
Pressure[ip] = 0.0f;
}
}
}
__global__ void updateParticlesDevice(const int pn, const float alpha,
const int* Material,
const float* VelDotX, const float* VelDotY, const float* VelDotZ,
const float* DensityDot, const float* EnergyDot,
const float* PosX0, const float* PosY0, const float* PosZ0,
const float* VelX0, const float* VelY0, const float* VelZ0,
const float* Density0, const float* Energy0,
float* PosX, float* PosY, float* PosZ,
float* VelX, float* VelY, float* VelZ,
float* Density, float* Energy, float* Pressure, float* Sound) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip;
int iMaterial;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + dRun.dt * VelX[ip] - PosX0[ip]);
PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + dRun.dt * VelY[ip] - PosY0[ip]);
PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + dRun.dt * VelZ[ip] - PosZ0[ip]);
VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + dRun.dt * VelDotX[ip] - VelX0[ip]);
VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + dRun.dt * VelDotY[ip] - VelY0[ip]);
VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + dRun.dt * VelDotZ[ip] - VelZ0[ip]);
Density[ip] = Density0[ip] + alpha * (Density[ip] + dRun.dt * DensityDot[ip] - Density0[ip]);
Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + dRun.dt * EnergyDot[ip] - Energy0[ip]);
iMaterial = Material[ip];
if (dMatType[iMaterial] == 0) {
VelX[ip] = VelX0[ip];
VelY[ip] = VelY0[ip];
VelZ[ip] = VelZ0[ip];
Density[ip] = Density0[ip];
Energy[ip] = Energy0[ip];
}
switch (dMatType[iMaterial]) {
case (0) : // BOUNDARY
Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = 0.0f*pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (1) : // IDEAL GAS EOS
Pressure[ip] = pressureGas(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundGas(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS
Density[ip] = densityPoly(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressurePoly(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundPoly(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (3) : // MIE-GRUNEISEN SHOCK EOS
//Density[ip] = densityShock(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureShock(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundShock(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (4) : // TAIT EOS
Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]);
Pressure[ip] = pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
case (5) : // ESS EOS
Pressure[ip] = pressureESS(dMatProp[iMaterial], Density[ip], Energy[ip]);
Sound[ip] = soundESS(dMatProp[iMaterial], Density[ip], Energy[ip]);
break;
default :
Pressure[ip] = 0.0f;
}
}
}
void updateLoadsHost(const int pn, const int* Material,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
float* VelDotX, float* VelDotY, float* VelDotZ,
float* EnergyDot) {
int ip, i;
for (ip = 0; ip < pn; ip++) {
if (Material[ip] > 0) {
for (i = 0; i < 10; i++) {
if ((PosX[ip] > hLoad[i].minX) &&
(PosX[ip] < hLoad[i].maxX) &&
(PosZ[ip] < hLoad[i].maxZ) &&
(PosY[ip] > hLoad[i].minY) &&
(PosY[ip] < hLoad[i].maxY) &&
(PosZ[ip] > hLoad[i].minZ) &&
(PosZ[ip] < hLoad[i].maxZ)) {
VelDotX[ip] += hLoad[i].gx;
VelDotY[ip] += hLoad[i].gy;
VelDotZ[ip] += hLoad[i].gz;
EnergyDot[ip] += hLoad[i].w;
}
if ((PosX[ip] > hFix[i].minX) &&
(PosX[ip] < hFix[i].maxX) &&
(PosY[ip] > hFix[i].minY) &&
(PosY[ip] < hFix[i].maxY) &&
(PosZ[ip] > hFix[i].minZ) &&
(PosZ[ip] < hFix[i].maxZ)) {
VelDotX[ip] += (hFix[i].vx - VelX[ip]) / hFix[i].tau;
VelDotY[ip] += (hFix[i].vy - VelY[ip]) / hFix[i].tau;
VelDotZ[ip] += (hFix[i].vz - VelZ[ip]) / hFix[i].tau;
}
}
}
}
}
__global__ void updateLoadsDevice(const int pn, const int* Material,
const float* PosX, const float* PosY, const float* PosZ,
const float* VelX, const float* VelY, const float* VelZ,
float* VelDotX, float* VelDotY, float* VelDotZ,
float* EnergyDot, const float time) {
int ip, i;
float x1, x2, x3, f;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if ((ip < pn) && (Material[ip] > 0)) {
for (i = 0; i < 10; i++) {
if ((PosX[ip] > dLoad[i].minX) &&
(PosX[ip] < dLoad[i].maxX) &&
(PosZ[ip] < dLoad[i].maxZ) &&
(PosY[ip] > dLoad[i].minY) &&
(PosY[ip] < dLoad[i].maxY) &&
(PosZ[ip] > dLoad[i].minZ) &&
(PosZ[ip] < dLoad[i].maxZ)) {
VelDotX[ip] += dLoad[i].gx;
VelDotY[ip] += dLoad[i].gy;
VelDotZ[ip] += dLoad[i].gz;
EnergyDot[ip] += dLoad[i].w;
}
if ((PosX[ip] > dFix[i].minX) &&
(PosX[ip] < dFix[i].maxX) &&
(PosY[ip] > dFix[i].minY) &&
(PosY[ip] < dFix[i].maxY) &&
(PosZ[ip] > dFix[i].minZ) &&
(PosZ[ip] < dFix[i].maxZ)) {
VelDotX[ip] += (dFix[i].vx - VelX[ip]) / dFix[i].tau;
VelDotY[ip] += (dFix[i].vy - VelY[ip]) / dFix[i].tau;
VelDotZ[ip] += (dFix[i].vz - VelZ[ip]) / dFix[i].tau;
}
}
x1 = 100.0f * PosZ[ip];
x2 = 100.0f * (PosY[ip] -0.04f);
x3 = 100.0f * (PosX[ip] +0.10f);
x3 -= x2 * 3.732f;
if (x3 < 0.0f) x3 = 0.0f;
f = expf(-0.5f * (powf(x1 / 5.0f, 2) + powf(x2 / 1.5f, 2)));
f *= 0.00130948f;
f *= expf(-x3 / 15.5814f);
f *= 1.0f - expf(-0.654066f - x3 / 6.72606f);
f *= 2.0e12 / 10388.0f;
f *= 50.0f;
if (fmodf(time, 1.0f/20.0f) > 0.001f) f = 0.0f;
//EnergyDot[ip] += f;
}
}
// Host code
int initHost(struct model *hm) {
hm->Material = (int *) malloc(MAXP * sizeof(int));
hm->Mass = (float *) malloc(MAXP * sizeof(float));
hm->Smooth = (float *) malloc(MAXP * sizeof(float));
hm->PosX = (float *) malloc(MAXP * sizeof(float));
hm->PosY = (float *) malloc(MAXP * sizeof(float));
hm->PosZ = (float *) malloc(MAXP * sizeof(float));
hm->VelX = (float *) malloc(MAXP * sizeof(float));
hm->VelY = (float *) malloc(MAXP * sizeof(float));
hm->VelZ = (float *) malloc(MAXP * sizeof(float));
hm->Density = (float *) malloc(MAXP * sizeof(float));
hm->Energy = (float *) malloc(MAXP * sizeof(float));
hm->Pressure = (float *) malloc(MAXP * sizeof(float));
hm->Sound = (float *) malloc(MAXP * sizeof(float));
hm->VelDotX = (float *) malloc(MAXP * sizeof(float));
hm->VelDotY = (float *) malloc(MAXP * sizeof(float));
hm->VelDotZ = (float *) malloc(MAXP * sizeof(float));
hm->DensityDot = (float *) malloc(MAXP * sizeof(float));
hm->EnergyDot = (float *) malloc(MAXP * sizeof(float));
hm->PosX0 = (float *) malloc(MAXP * sizeof(float));
hm->PosY0 = (float *) malloc(MAXP * sizeof(float));
hm->PosZ0 = (float *) malloc(MAXP * sizeof(float));
hm->VelX0 = (float *) malloc(MAXP * sizeof(float));
hm->VelY0 = (float *) malloc(MAXP * sizeof(float));
hm->VelZ0 = (float *) malloc(MAXP * sizeof(float));
hm->Density0 = (float *) malloc(MAXP * sizeof(float));
hm->Energy0 = (float *) malloc(MAXP * sizeof(float));
hm->Hash = (int *) malloc(MAXP * sizeof(int));
hm->Index = (int *) malloc(MAXP * sizeof(int));
hm->List = (int *) malloc(MAXP * MAXN * sizeof(int));
hm->IntDummy = (int *) malloc(MAXP * sizeof(int));
hm->FloatDummy = (float *) malloc(MAXP * sizeof(float));
hm->SetStart = (int *) malloc(MAXG * sizeof(int));
hm->SetStop = (int *) malloc(MAXG * sizeof(int));
return 0;
}
int initDevice(struct model *dm) {
size_t available, total;
cudaMalloc((void**) &(dm->Material), (MAXP * sizeof(int)));
cudaMalloc((void**) &(dm->Mass), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Smooth), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->PosX), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->PosY), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->PosZ), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelX), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelY), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelZ), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Density), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Energy), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Pressure), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Sound), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelDotX), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelDotY), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelDotZ), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->DensityDot), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->EnergyDot), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->PosX0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->PosY0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->PosZ0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelX0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelY0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->VelZ0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Density0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Energy0), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->Hash), (MAXP * sizeof(int)));
cudaMalloc((void**) &(dm->Index), (MAXP * sizeof(int)));
cudaMalloc((void**) &(dm->List), (MAXP * MAXN * sizeof(int)));
cudaMalloc((void**) &(dm->IntDummy), (MAXP * sizeof(int)));
cudaMalloc((void**) &(dm->FloatDummy), (MAXP * sizeof(float)));
cudaMalloc((void**) &(dm->SetStart), (MAXG * sizeof(int)));
cudaMalloc((void**) &(dm->SetStop), (MAXG * sizeof(int)));
cudaMemGetInfo(&available, &total);
printf("Available memory %d of %d MB\n", available/1024/1024, total/1024/1024);
return 0;
}
int copyHostToDevice(struct model *hm, struct model *dm) {
dm->pn = hm->pn;
cudaMemcpy(dm->Material, hm->Material, (MAXP * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Mass, hm->Mass, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Smooth, hm->Smooth, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosX, hm->PosX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosY, hm->PosY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosZ, hm->PosZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelX, hm->VelX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelY, hm->VelY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelZ, hm->VelZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Density, hm->Density, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Energy, hm->Energy, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Pressure, hm->Pressure, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Sound, hm->Sound, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelDotX, hm->VelDotX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelDotY, hm->VelDotY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelDotZ, hm->VelDotZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->DensityDot, hm->DensityDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->EnergyDot, hm->EnergyDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosX0, hm->PosX0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosY0, hm->PosY0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosZ0, hm->PosZ0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelX0, hm->VelX0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelY0, hm->VelY0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelZ0, hm->VelZ0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Density0, hm->Density0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Energy0, hm->Energy0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->List, hm->List, (MAXP * MAXN * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Hash, hm->Hash, (MAXP * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Index, hm->Index, (MAXP * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->IntDummy, hm->IntDummy, (MAXP * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->FloatDummy, hm->FloatDummy, (MAXP * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->SetStart, hm->SetStart, (MAXG * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->SetStop, hm->SetStop, (MAXG * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(&dGrid, &hGrid, (sizeof(struct grid)), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(dMatType, hMatType, 10 * sizeof(int));
cudaMemcpyToSymbol(dMatProp, hMatProp, 100 * sizeof(float));
//cudaMemcpyToSymbol(dGrid, &hGrid, sizeof(struct grid));
cudaMemcpyToSymbol(dRun, &hRun, sizeof(struct simulation));
cudaMemcpyToSymbol(dLoad, &hLoad, 10 * sizeof(struct load));
cudaMemcpyToSymbol(dFix, &hFix, 10 * sizeof(struct fix));
cudaMemcpyToSymbol(dIn, &hIn, 10 * sizeof(struct inlet));
cudaMemcpyToSymbol(dOut, &hOut, 10 * sizeof(struct outlet));
return 0;
}
int copyDeviceToHost(struct model *dm, struct model *hm) {
hm->pn = dm->pn;
cudaMemcpy(hm->Material, dm->Material, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Mass, dm->Mass, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Smooth, dm->Smooth, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->PosX, dm->PosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->PosY, dm->PosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->PosZ, dm->PosZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelX, dm->VelX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelY, dm->VelY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelZ, dm->VelZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Density, dm->Density, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Energy, dm->Energy, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Pressure, dm->Pressure, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Sound, dm->Sound, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelDotX, dm->VelDotX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelDotY, dm->VelDotY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelDotZ, dm->VelDotZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->DensityDot, dm->DensityDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->EnergyDot, dm->EnergyDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->PosX0, dm->PosX0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->PosY0, dm->PosY0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->PosZ0, dm->PosZ0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelX0, dm->VelX0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelY0, dm->VelY0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->VelZ0, dm->VelZ0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Density0, dm->Density0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Energy0, dm->Energy0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->List, dm->List, (MAXP * MAXN * sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Hash, dm->Hash, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->Index, dm->Index, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->IntDummy, dm->IntDummy, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->FloatDummy, dm->FloatDummy, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->SetStart, dm->SetStart, (MAXG * sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(hm->SetStop, dm->SetStop, (MAXG * sizeof(int)), cudaMemcpyDeviceToHost);
cudaMemcpy(&hGrid, &dGrid, (sizeof(struct grid)), cudaMemcpyDeviceToHost);
return 0;
}
int backupDataHost(struct model *hm) {
memcpy(hm->PosX0, hm->PosX, MAXP * sizeof(float));
memcpy(hm->PosY0, hm->PosY, MAXP * sizeof(float));
memcpy(hm->PosZ0, hm->PosZ, MAXP * sizeof(float));
memcpy(hm->VelX0, hm->VelX, MAXP * sizeof(float));
memcpy(hm->VelY0, hm->VelY, MAXP * sizeof(float));
memcpy(hm->VelZ0, hm->VelZ, MAXP * sizeof(float));
memcpy(hm->Density0, hm->Density, MAXP * sizeof(float));
memcpy(hm->Energy0, hm->Energy, MAXP * sizeof(float));
return 0;
}
int backupDataDevice(struct model *dm) {
cudaMemcpy(dm->PosX0, dm->PosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
cudaMemcpy(dm->PosY0, dm->PosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
cudaMemcpy(dm->PosZ0, dm->PosZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
cudaMemcpy(dm->VelX0, dm->VelX, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
cudaMemcpy(dm->VelY0, dm->VelY, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
cudaMemcpy(dm->VelZ0, dm->VelZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
cudaMemcpy(dm->Density0, dm->Density, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
cudaMemcpy(dm->Energy0, dm->Energy, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice);
return 0;
}
/*
int initRun() {
FILE *stream;
char tok[10];
int i, m, p, pn;
int iv;
float fv;
int mpn, mpp[10];
// Open stream file
stream = fopen("armando.run", "r");
while (!feof(stream)) {
sprintf(tok, " ");
fscanf(stream, "%s", tok);
if (strcmp(tok, "MAT") == 0) {
fscanf(stream, "%i", &iv);
if ((iv > 0) && (iv <= 50))
m = iv;
for (p = 0; p < 10; p++)
hMatProp[m][p] = 0.0;
if ((m > 0) && (m <= 10))
pn = 3;
if ((m > 10) && (m <= 20))
pn = 9;
if ((m > 20) && (m <= 30))
pn = 10;
if ((m > 30) && (m <= 40))
pn = 5;
if ((m > 40) && (m <= 50))
pn = 3;
for (p = 0; p < pn; p++) {
fscanf(stream, "%f", &fv);
hMatProp[m][p] = fv;
}
printf("Material %d\n", m);
printf("hMatProp: \n");
for (p = 0; p < pn; p++)
printf(" %f\n", hMatProp[m][p]);
printf("\n");
}
if (strcmp(tok, "TIME") == 0) {
fscanf(stream, "%f", &fv);
if (fv > 0.0)
hRun.dt = fv;
fscanf(stream, "%i", &iv);
if (iv > 0)
hRun.tsn = iv;
fscanf(stream, "%i", &iv);
if (iv > 0)
hRun.ssi = iv;
printf("Time step: %f\n", hRun.dt);
printf("Steps: %i\n", hRun.tsn);
printf("Save step: %i\n", hRun.ssi);
printf("\n");
}
if (strcmp(tok, "LIMITS") == 0) {
fscanf(stream, "%f", &fv);
hRun.minX = fv;
fscanf(stream, "%f", &fv);
hRun.maxX = fv;
fscanf(stream, "%f", &fv);
hRun.minY = fv;
fscanf(stream, "%f", &fv);
hRun.maxY = fv;
printf("Domain limits: \n");
printf("X: %+e - %+e \n", hRun.minX, hRun.maxX);
printf("Y: %+e - %+e \n", hRun.minY, hRun.maxY);
printf("\n");
}
if (strcmp(tok, "MONITORS") == 0) {
fscanf(stream, "%i", &iv);
mpn = iv;
for (i = 0; i < mpn; i++) {
fscanf(stream, "%i", &iv);
mpp[i] = iv;
}
printf("Monitored particles: %i \n", mpn);
if (mpn > 0) {
printf("Index:");
for (i = 0; i < mpn; i++)
printf(" %i", mpp[i]);
printf("\n");
printf("\n");
}
}
}
fclose(stream);
hSound = hSmooth / hRun.dt;
return 0;
}
*/
int scanData(struct model *hm) {
FILE *stream;
int i;
float fv1, fv2, fv3, fv4;
int iv;
// Stream file position
stream = fopen("pos.txt", "r");
for (i = 0; !feof(stream); i++) {
fscanf(stream, "%e %e %e", &fv1, &fv2, &fv3);
hm->PosX[i] = fv1;
hm->PosY[i] = fv2;
hm->PosZ[i] = fv3;
}
fclose(stream);
hm->pn = i;
// Stream file velocity
stream = fopen("vel.txt", "r");
for (i = 0; i < hm->pn; i++) {
fscanf(stream, "%e %e %e", &fv1, &fv2, &fv3);
hm->VelX[i] = fv1;
hm->VelY[i] = fv2;
hm->VelZ[i] = fv3;
}
fclose(stream);
// Stream file info
stream = fopen("info.txt", "r");
for (i = 0; i < hm->pn; i++) {
fscanf(stream, "%i %e %e ", &iv, &fv1, &fv2);
hm->Material[i] = iv;
hm->Mass[i] = fv1;
hm->Smooth[i] = fv2;
}
fclose(stream);
// Stream file field
stream = fopen("field.txt", "r");
for (i = 0; i < hm->pn; i++) {
fscanf(stream, "%e %e %e %e ", &fv1, &fv2, &fv3, &fv4);
hm->Density[i] = fv1;
hm->Energy[i] = fv2;
hm->Pressure[i] = fv3;
hm->Sound[i] = fv4;
}
fclose(stream);
return 0;
}
int printData(struct model *hm) {
/**
* \brief Particle data file output
*
* Saves particle data on a disk file
*
* \date Oct 21, 2010
* \author Luca Massidda
*/
FILE *stream;
int i;
// Stream file position
stream = fopen("pos.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]);
fclose(stream);
// Stream file velocity
stream = fopen("vel.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]);
fclose(stream);
// Stream file info
stream = fopen("info.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%i %+14.8e %+14.8e \n", hm->Material[i], hm->Mass[i], hm->Smooth[i]);
fclose(stream);
// Stream file field
stream = fopen("field.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+14.8e %+14.8e %+14.8e %+14.8e \n", hm->Density[i], hm->Energy[i], hm->Pressure[i], hm->Sound[i]);
fclose(stream);
/*
// Stream file debug
stream = fopen("debug.txt", "w");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%d %d %d %d %d\n", i, hm->Index[i], hm->Hash[i], hm->SetStart[hm->Hash[i]], hm->SetStop[hm->Hash[i]]);
fclose(stream);
// Stream file list
stream = fopen("list.txt", "w");
for (i = 0; i < hm->pn; i++) {
for (j = 0; j < 10; j++)
fprintf(stream, "%d ", hm->List[i*MAXN +j]);
fprintf(stream, "\n");
}
fclose(stream);
*/
return 0;
}
int outputVTK(struct model *hm, int ss) {
/**
* \brief Output Data file
*
* Saves vtk data file
*
* \date Oct 21, 2010
* \author Luca Massidda
*/
FILE *stream;
char filename[80];
int i;
// Stream position file
sprintf(filename, "out%05d.vtk", ss);
stream = fopen(filename, "w");
fprintf(stream, "# vtk DataFile Version 2.0\n");
fprintf(stream, "Unstructured Grid Example\n");
fprintf(stream, "ASCII\n");
fprintf(stream, "DATASET UNSTRUCTURED_GRID\n");
fprintf(stream, "POINTS %i float\n", hm->pn);
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e %+e %+e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]);
fprintf(stream, "CELLS %i %i \n", hm->pn, 2*hm->pn);
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%i %i \n", 1, i);
fprintf(stream, "CELL_TYPES %i \n", hm->pn);
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%i \n", 1);
fprintf(stream, "POINT_DATA %i \n", hm->pn);
fprintf(stream, "SCALARS material int 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+d \n", hm->Material[i]);
fprintf(stream, "SCALARS density float 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e \n", hm->Density[i]);
fprintf(stream, "SCALARS pressure float 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e \n", hm->Pressure[i]);
fprintf(stream, "SCALARS energy float 1 \n", hm->pn);
fprintf(stream, "LOOKUP_TABLE default\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e \n", hm->Energy[i]);
fprintf(stream, "VECTORS velocity float\n");
for (i = 0; i < hm->pn; i++)
fprintf(stream, "%+e %+e %+e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]);
fclose(stream);
/*
for (i = 0; i < hm->pn; i++) printf("%d %d \n", i, hm->Hash[i]);
printf("\n\n\n");
for (i = 0; i < hm->SetStart.size(); i++) printf("%d %d %d \n", i, hm->SetStart[i], hm->SetStop[i]);
for (i = 0; i < hm->pn; i++) {
printf("%d - ", i);
for (j = 0; j < MAXN; j++) printf("%d ", hm->List[i*MAXN +j]);
printf("\n");
}
*/
return 0;
}
void initFree(struct model *hm) {
int i, j, k, m, b, pi;
double rho, c0, pmin;
double dr;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 50.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1; // x4
pi = 0;
for (k = 0; k < 10; k++) {
for (j = 0; j < 10; j++) {
for (i = 0; i < 10; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = 0; k < 10; k++) {
for (j = -2; j < -1; j++) {
for (i = 0; i < 10; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = 2.0e-3; //1.0e-3;
hRun.tsn = 600; //1000;
hRun.ssi = 200;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
printf("Freefall\n");
printf("Particles: %i \n", hm->pn);
}
void initBox(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 1;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 40.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1f / q;
pi = 0;
for (k = 0; k < 10 * q; k++) {
for (j = 0; j < 10 * q; j++) {
for (i = 0; i < 15 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < 20 * q +2; k++) {
for (j = -2; j < -1; j++) {
for (i = -2; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < -1; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = -2; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = 20 * q +1; k < 20 * q +2 ; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = -2; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 20 * q +1; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = -2; i < -1; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 20 * q +1; k++) {
for (j = -1; j < 15 * q; j++) {
for (i = 20 * q +1; i < 20 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = dr / c0;
hRun.tsn = 800 * q;
hRun.ssi = 20 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
printf("Box\n");
printf("Particles: %i \n", hm->pn);
}
void initBath(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 15;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 40.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1f / q;
pi = 0;
for (k = 0; k < 10 * q; k++) {
for (j = 0; j < 10 * q; j++) {
for (i = 0; i < 10 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < 10 * q +2; k++) {
for (j = -2; j < -1; j++) {
for (i = -2; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -2; k < -1; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = -2; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = 10 * q +1; k < 10 * q +2 ; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = -2; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 10 * q +1; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = -2; i < -1; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
for (k = -1; k < 10 * q +1; k++) {
for (j = -1; j < 12 * q; j++) {
for (i = 10 * q +1; i < 10 * q +2; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 1.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2 * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = dr / c0;
hRun.tsn = 1000 * q;
hRun.ssi = 20 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hOut[0].oX = 5 * q * dr;
hOut[0].oY = dr;
hOut[0].oZ = 5 * q * dr;
hOut[0].nX = 0.0f;
hOut[0].nY = 1.0f;
hOut[0].nZ = 0.0f;
hOut[0].R = 2.0f*q*dr;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 0.f * q * dr;
hIn[0].oY = 15.f * q * dr;
hIn[0].oZ = 5.f * q * dr;
hIn[0].nX = 1.0f;
hIn[0].nY = 0.0f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 1.5f;
hIn[0].R = 2.0f *q*dr;
printf("Bath\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void initChannel(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 1;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 20.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.1f / q;
pi = 0;
/*
for (k = 0; k < 10 * q; k++) {
for (j = 0; j < 10 * q; j++) {
for (i = 0; i < 10 * q; i++) {
hm->PosX[pi] = i * dr + 0.0 * dr;
hm->PosY[pi] = j * dr + 0.0 * dr;
hm->PosZ[pi] = k * dr + 0.0 * dr;
hm->VelX[pi] = 0.0;
hm->VelY[pi] = 0.0;
hm->VelZ[pi] = 0.0;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0;
hm->Pressure[pi] = 1.0;
pi++;
}
}
}
*/
for (k = -10 * q -2; k <= 10 * q +2; k++) {
for (j = -2; j <= -2; j++) {
for (i = 0; i <= 100 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -10 * q -2; k <= -10 * q -2; k++) {
for (j = -1; j <= 15 * q; j++) {
for (i = 0; i < 100 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = 10 * q +2; k <= 10 * q +2 ; k++) {
for (j = -1; j <= 15 * q; j++) {
for (i = 0; i <= 100 * q; i++) {
hm->PosX[pi] = i * dr + 0.0f * dr;
hm->PosY[pi] = j * dr + 0.0f * dr;
hm->PosZ[pi] = k * dr + 0.0f * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -0.5f;
hRun.maxX = 10.5f;
hRun.minY = -0.5f;
hRun.maxY = 3.0f;
hRun.minZ = -1.5f;
hRun.maxZ = 1.5f;
hRun.dt = dr / c0;
hRun.tsn = 10000 * q;
hRun.ssi = 200 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hFix[0].minX = hRun.minX;
hFix[0].maxX = 2.0f * q * dr;
hFix[0].minY = hRun.minY;
hFix[0].maxY = hRun.maxY;
hFix[0].minZ = hRun.minZ;
hFix[0].maxZ = hRun.maxZ;
hFix[0].vx = 2.0f;
hFix[0].vy = 0.0f;
hFix[0].vz = 0.0f;
hFix[0].tau = 10.0 * hRun.dt;
hFix[1].minX = 97 * q * dr;
hFix[1].maxX = hRun.maxX;
hFix[1].minY = hRun.minY;
hFix[1].maxY = hRun.maxY;
hFix[1].minZ = hRun.minZ;
hFix[1].maxZ = hRun.maxZ;
hFix[1].vx = 2.0f;
hFix[1].vy = 0.0f;
hFix[1].vz = 0.0f;
hFix[1].tau = 10.0 * hRun.dt;
hOut[0].oX = 100 * q * dr;
hOut[0].oY = 5 * q * dr;
hOut[0].oZ = 0.0f;
hOut[0].nX = -1.0f;
hOut[0].nY = 0.0f;
hOut[0].nZ = 0.0f;
hOut[0].R = 20.0f*q*dr;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 10.f * q * dr;
hIn[0].oY = 20.f * q * dr;
hIn[0].oZ = 0.f * q * dr;
hIn[0].nX = 0.5f;
hIn[0].nY = -0.5f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 2.0f;
hIn[0].R = 10.0f *q*dr;
printf("Channel\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void initESS(struct model *hm) {
int i, m, b;
double rho, cm, cb, pmin;
double dr;
FILE *stream;
float fv1, fv2, fv3;
m = 1;
b = 2;
rho = 10388.0f;
cm = 17.4f;
cb = 20.0f;
pmin = -1.0e6;
dr = 4.0e-3;
hMatType[m] = 5;
hMatProp[m][0] = rho;
hMatProp[m][1] = cm;
hMatProp[m][2] = 2.66;
hMatProp[m][3] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = cb;
hMatProp[b][2] = pmin;
hRun.minX = -1.0f;
hRun.maxX = 2.0f;
hRun.minY = -0.2f;
hRun.maxY = 0.6f;
hRun.minZ = -0.2f;
hRun.maxZ = 0.2f;
hRun.dt = dr / cm;
hRun.dt = 0.2e-3;
hRun.tsn = 10000;
hRun.ssi = 200;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hFix[0].minX = 0.90f;
hFix[0].maxX = 1.00f;
hFix[0].minY = hRun.minY;
hFix[0].maxY = hRun.maxY;
hFix[0].minZ = hRun.minZ;
hFix[0].maxZ = hRun.maxZ;
hFix[0].vx = -1.5f;
hFix[0].vy = 0.0f;
hFix[0].vz = 0.0f;
hFix[0].tau = 8.0 * hRun.dt;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 1.40f;
hIn[0].oY = 0.5f;
hIn[0].oZ = 0.05f;
hIn[0].nX = 0.0f;
hIn[0].nY = -1.0f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 3.0f;
hIn[0].R = 0.04f;
hIn[1].Material = m;
hIn[1].Mass = rho * dr * dr * dr;
hIn[1].Smooth = 1.2f * dr;
hIn[1].Density = 0.0f;
hIn[1].Energy = 0.0f;
hIn[1].oX = 1.40f;
hIn[1].oY = 0.5f;
hIn[1].oZ = -0.05f;
hIn[1].nX = 0.0f;
hIn[1].nY = -1.0f;
hIn[1].nZ = 0.0f;
hIn[1].Velocity = 3.0f;
hIn[1].R = 0.04f;
// Stream file position
stream = fopen("surface.txt", "r");
for (i = 0; !feof(stream); i++) {
fscanf(stream, "%e %e %e", &fv1, &fv2, &fv3);
hm->PosX[i] = fv1;
hm->PosY[i] = fv2;
hm->PosZ[i] = fv3;
}
fclose(stream);
hm->pn = i;
for (i = 0; i < hm->pn; i++) {
hm->VelX[i] = 0.0f;
hm->VelY[i] = 0.0f;
hm->VelZ[i] = 0.0f;
hm->Material[i] = 2;
hm->Mass[i] = rho * powf(dr, 3);
hm->Smooth[i] = 1.2f*dr;
hm->Density[i] = 0.0f;
hm->Energy[i] = 0.0f;
hm->Pressure[i] = 0.0f;
hm->Sound[i] = cb;
}
printf("ESS\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void steadyESS(struct model *hm) {
int m, b, i;
double rho, cm, cb, pmin;
double dr;
m = 1;
b = 2;
rho = 10388.0f;
cm = 17.400f;
cb = 20.0f;
pmin = -150.0e3;
dr = 4.0e-3;
hMatType[m] = 5;
hMatProp[m][0] = rho;
hMatProp[m][1] = cm;
hMatProp[m][2] = 0.0f*2.66f;
hMatProp[m][3] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = cb;
hMatProp[b][2] = pmin;
hRun.minX = -1.0f;
hRun.maxX = 2.0f;
hRun.minY = -0.2f;
hRun.maxY = 0.6f;
hRun.minZ = -0.2f;
hRun.maxZ = 0.2f;
hRun.dt = dr / cm;
hRun.dt = 0.2e-3;
hRun.tsn = 10000;
hRun.ssi = 1000;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
hFix[0].minX = 0.90f;
hFix[0].maxX = 1.00f;
hFix[0].minY = hRun.minY;
hFix[0].maxY = hRun.maxY;
hFix[0].minZ = hRun.minZ;
hFix[0].maxZ = hRun.maxZ;
hFix[0].vx = -1.5f * 1.50f;
hFix[0].vy = 0.0f;
hFix[0].vz = 0.0f;
hFix[0].tau = 8.0 * hRun.dt;
hIn[0].Material = m;
hIn[0].Mass = rho * dr * dr * dr;
hIn[0].Smooth = 1.2f * dr;
hIn[0].Density = 0.0f;
hIn[0].Energy = 0.0f;
hIn[0].oX = 1.40f;
hIn[0].oY = 0.5f;
hIn[0].oZ = 0.05f;
hIn[0].nX = 0.0f;
hIn[0].nY = -1.0f;
hIn[0].nZ = 0.0f;
hIn[0].Velocity = 3.0f * 1.50f;
hIn[0].R = 0.04f;
hIn[1].Material = m;
hIn[1].Mass = rho * dr * dr * dr;
hIn[1].Smooth = 1.2f * dr;
hIn[1].Density = 0.0f;
hIn[1].Energy = 0.0f;
hIn[1].oX = 1.40f;
hIn[1].oY = 0.5f;
hIn[1].oZ = -0.05f;
hIn[1].nX = 0.0f;
hIn[1].nY = -1.0f;
hIn[1].nZ = 0.0f;
hIn[1].Velocity = 3.0f * 1.50f;
hIn[1].R = 0.04f;
scanData(hm);
for (i = 0; i < hm->pn; i++) {
/*
hm->Density[i] = 0.0f;
hm->Energy[i] = 0.0f;
hm->Pressure[i] = 0.0f;
hm->Sound[i] = cb;
*/
}
printf("ESS\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
void initDamBreak(struct model *hm) {
int i, j, k, m, b, q, pi;
double rho, c0, pmin;
double dr;
q = 4;
m = 1;
b = 2;
rho = 1000.0f;
c0 = 20.0f;
pmin = -1.e12;
hMatType[m] = 4;
hMatProp[m][0] = rho;
hMatProp[m][1] = c0;
hMatProp[m][2] = pmin;
hMatType[b] = 0;
hMatProp[b][0] = rho;
hMatProp[b][1] = c0;
hMatProp[b][2] = pmin;
dr = 0.025f / q;
pi = 0;
for (k = -20 * q; k <= 20 * q; k++) {
for (j = 0; j <= 22 * q; j++) {
for (i = 0; i <= 49 * q; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = m;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -2; k <= 20 * q +2; k++) {
for (j = -2; j <= -2; j++) {
for (i = -80 * q -2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -2; k <= -20 * q -2; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = -80 * q -2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = 20 * q +2; k <= 20 * q +2; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = -80 * q -2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -1; k <= 20 * q +1; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = -80 * q -2; i <= -80 * q -2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -20 * q -1; k <= 20 * q +1; k++) {
for (j = -1; j <= 40 * q; j++) {
for (i = 49 * q +2; i <= 49 * q +2; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
for (k = -8 * q -1; k <= 8 * q +1; k++) {
for (j = -0; j <= 6 * q -1; j++) {
for (i = -53 * q +1; i <= -47 * q -1; i++) {
hm->PosX[pi] = i * dr;
hm->PosY[pi] = j * dr;
hm->PosZ[pi] = k * dr;
hm->VelX[pi] = 0.0f;
hm->VelY[pi] = 0.0f;
hm->VelZ[pi] = 0.0f;
hm->Material[pi] = b;
hm->Density[pi] = 0.0f;
hm->Energy[pi] = 0.0f;
hm->Pressure[pi] = 0.0f;
pi++;
}
}
}
hm->pn = pi;
for (i = 0; i < hm->pn; i++) {
hm->Mass[i] = rho * dr * dr * dr;
hm->Smooth[i] = 1.2f * dr;
hm->Sound[i] = c0;
}
hRun.minX = -2.5f;
hRun.maxX = 2.5f;
hRun.minY = -2.5f;
hRun.maxY = 2.5f;
hRun.minZ = -2.5f;
hRun.maxZ = 2.5f;
hRun.dt = dr / c0;
hRun.tsn = 2000 * q;
hRun.ssi = 40 * q;
hGrid.oX = hRun.minX;
hGrid.oY = hRun.minY;
hGrid.oZ = hRun.minZ;
hGrid.size = 2.0f * 1.2f * dr;
hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1;
hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1;
hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1;
hLoad[0].minX = hRun.minX;
hLoad[0].maxX = hRun.maxX;
hLoad[0].minY = hRun.minY;
hLoad[0].maxY = hRun.maxY;
hLoad[0].minZ = hRun.minZ;
hLoad[0].maxZ = hRun.maxZ;
hLoad[0].gy = -9.81f;
printf("Dam break\n");
printf("Particles: %i \n", hm->pn);
printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ);
}
int iSort(int *array, int *perm, int n) {
int i;
static int* dummy = NULL;
if (!dummy) dummy = (int *) malloc(MAXP * sizeof(int));
for (i = 0; i < n; i++) dummy[i] = array[i];
for (i = 0; i < n; i++) array[i] = dummy[perm[i]];
return 0;
}
int fSort(float *array, int *perm, int n) {
int i;
static float* dummy = NULL;
if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float));
for (i = 0; i < n; i++) dummy[i] = array[i];
for (i = 0; i < n; i++) array[i] = dummy[perm[i]];
return 0;
}
int mapCompare(const void *a, const void *b)
{
int c;
struct pair m1, m2;
c = 0;
m1 = *(struct pair*)a;
m2 = *(struct pair*)b;
if (m1.key < m2.key) c = -1;
if (m1.key > m2.key) c = 1;
return c;
}
void updateHashHost(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip, ix, iy, iz, ic;
for (ip = 0; ip < pn; ip++) {
ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
if (ic < 0) ic = 0;
if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1;
Hash[ip] = ic;
}
}
__global__ void updateHashDevice(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip, ix, iy, iz, ic;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
if (ic < 0) ic = 0;
if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1;
Hash[ip] = ic;
}
}
void checkOutHost(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
float dn, dr;
int ip, i;
for (ip = 0; ip < pn; ip++) {
for (i = 0; i < 10; i++) {
dn = 0.0f;
dn += (PosX[ip] - hOut[i].oX) * hOut[i].nX;
dn += (PosY[ip] - hOut[i].oY) * hOut[i].nY;
dn += (PosZ[ip] - hOut[i].oZ) * hOut[i].nZ;
dr = 0.0f;
dr += powf((PosX[ip] - hOut[i].oX) - dn * hOut[i].nX, 2);
dr += powf((PosY[ip] - hOut[i].oY) - dn * hOut[i].nY, 2);
dr += powf((PosZ[ip] - hOut[i].oZ) - dn * hOut[i].nZ, 2);
dr = sqrtf(dr);
if ((dn < 0.0f) && (dr < hOut[i].R)) {
Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
if ((PosX[ip] > hRun.maxX) ||
(PosX[ip] < hRun.minX) ||
(PosY[ip] > hRun.maxY) ||
(PosY[ip] < hRun.minY) ||
(PosZ[ip] > hRun.maxZ) ||
(PosZ[ip] < hRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
__global__ void checkOutDevice(const int pn,
const float* PosX, const float* PosY, const float* PosZ,
int* Hash, const struct grid Grid) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
float dn, dr;
int ip, i;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip < pn) {
for (i = 0; i < 10; i++) {
dn = 0.0f;
dn += (PosX[ip] - dOut[i].oX) * dOut[i].nX;
dn += (PosY[ip] - dOut[i].oY) * dOut[i].nY;
dn += (PosZ[ip] - dOut[i].oZ) * dOut[i].nZ;
dr = 0.0f;
dr += powf((PosX[ip] - dOut[i].oX) - dn * dOut[i].nX, 2);
dr += powf((PosY[ip] - dOut[i].oY) - dn * dOut[i].nY, 2);
dr += powf((PosZ[ip] - dOut[i].oZ) - dn * dOut[i].nZ, 2);
dr = sqrtf(dr);
if ((dn < 0.0f) && (dr < dOut[i].R)) {
Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
if ((PosX[ip] > dRun.maxX) ||
(PosX[ip] < dRun.minX) ||
(PosY[ip] > dRun.maxY) ||
(PosY[ip] < dRun.minY) ||
(PosZ[ip] > dRun.maxZ) ||
(PosZ[ip] < dRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ;
}
}
void inletHost(struct model *hm) {
int i, iu, iv, n, p;
float3 u, v, w, r;
int material[MAXPI];
float mass[MAXPI], smooth[MAXPI];
float posX[MAXPI], posY[MAXPI], posZ[MAXPI];
float velX[MAXPI], velY[MAXPI], velZ[MAXPI];
float density[MAXPI], energy[MAXPI];
p = 0;
for (i = 0; i < 10; i++) if (hIn[i].Material != 0) {
hIn[i].Distance += hIn[i].Velocity * hRun.dt;
if (hIn[i].Distance > hIn[i].Smooth / 1.2f) {
hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f;
w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ);
w = normalize(w);
if ((w.x <= w.y) && (w.x <= w.z)) u = make_float3(1.f, 0.f, 0.f);
if ((w.y <= w.x) && (w.y <= w.z)) u = make_float3(0.f, 1.f, 0.f);
if ((w.z <= w.x) && (w.z <= w.y)) u = make_float3(0.f, 0.f, 1.f);
v = cross(w, u);
n = (int) roundf(1.2f * hIn[i].R / hIn[i].Smooth);
for (iv = -n; iv <= n; iv++) {
for (iu = -n; iu <= n; iu++) {
r = iu * u + iv * v;
r *= hIn[i].Smooth / 1.2f;
if (length(r) < hIn[i].R) {
material[p] = hIn[i].Material;
mass[p] = hIn[i].Mass;
smooth[p] = hIn[i].Smooth;
posX[p] = hIn[i].oX + r.x;
posY[p] = hIn[i].oY + r.y;
posZ[p] = hIn[i].oZ + r.z;
velX[p] = hIn[i].Velocity * w.x;
velY[p] = hIn[i].Velocity * w.y;
velZ[p] = hIn[i].Velocity * w.z;
density[p] = hIn[i].Density;
energy[p] = hIn[i].Energy;
p++;
}
}
}
}
}
cudaMemcpy(hm->Material + hm->pn, material, (p * sizeof(int)), cudaMemcpyHostToHost);
cudaMemcpy(hm->Mass + hm->pn, mass, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->Smooth + hm->pn, smooth, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->PosX + hm->pn, posX, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->PosY + hm->pn, posY, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->PosZ + hm->pn, posZ, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->VelX + hm->pn, velX, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->VelY + hm->pn, velY, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->VelZ + hm->pn, velZ, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->Density + hm->pn, density, (p * sizeof(float)), cudaMemcpyHostToHost);
cudaMemcpy(hm->Energy + hm->pn, energy, (p * sizeof(float)), cudaMemcpyHostToHost);
hm->pn += p;
}
void inletDevice(struct model *dm) {
int i, iu, iv, n, p;
float3 u, v, w, r;
int material[MAXPI];
float mass[MAXPI], smooth[MAXPI];
float posX[MAXPI], posY[MAXPI], posZ[MAXPI];
float velX[MAXPI], velY[MAXPI], velZ[MAXPI];
float density[MAXPI], energy[MAXPI];
p = 0;
for (i = 0; i < 10; i++) if (hIn[i].Material != 0) {
hIn[i].Distance += hIn[i].Velocity * hRun.dt;
if (hIn[i].Distance > hIn[i].Smooth / 1.2f) {
hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f;
w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ);
w = normalize(w);
if ((fabsf(w.x) <= fabsf(w.y)) && (fabsf(w.x) <= fabsf(w.z))) u = make_float3(1.f, 0.f, 0.f);
if ((fabsf(w.y) <= fabsf(w.x)) && (fabsf(w.y) <= fabsf(w.z))) u = make_float3(0.f, 1.f, 0.f);
if ((fabsf(w.z) <= fabsf(w.x)) && (fabsf(w.z) <= fabsf(w.y))) u = make_float3(0.f, 0.f, 1.f);
v = cross(w, u);
n = (int) roundf(1.2f * hIn[i].R / hIn[i].Smooth);
for (iv = -n; iv <= n; iv++) {
for (iu = -n; iu <= n; iu++) {
r = iu * u + iv * v;
r *= hIn[i].Smooth / 1.2f;
if (length(r) < hIn[i].R) {
material[p] = hIn[i].Material;
mass[p] = hIn[i].Mass;
smooth[p] = hIn[i].Smooth;
posX[p] = hIn[i].oX + r.x;
posY[p] = hIn[i].oY + r.y;
posZ[p] = hIn[i].oZ + r.z;
velX[p] = hIn[i].Velocity * w.x;
velY[p] = hIn[i].Velocity * w.y;
velZ[p] = hIn[i].Velocity * w.z;
density[p] = hIn[i].Density;
energy[p] = hIn[i].Energy;
p++;
}
}
}
}
}
cudaMemcpy(dm->Material + dm->pn, material, (p * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Mass + dm->pn, mass, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Smooth + dm->pn, smooth, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosX + dm->pn, posX, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosY + dm->pn, posY, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->PosZ + dm->pn, posZ, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelX + dm->pn, velX, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelY + dm->pn, velY, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->VelZ + dm->pn, velZ, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Density + dm->pn, density, (p * sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(dm->Energy + dm->pn, energy, (p * sizeof(float)), cudaMemcpyHostToDevice);
dm->pn += p;
}
void updateSetsHost(const int pn, int *SetStart, int *SetStop,
const int* Hash) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
int ip;
int hash, nextHash, prevHash;
for (ip = 0; ip < pn; ip++) {
hash = Hash[ip];
if (ip == 0) prevHash = -1;
else prevHash = Hash[ip -1];
if (ip == pn -1) nextHash = -1;
else nextHash = Hash[ip +1];
if (hash != prevHash) SetStart[hash] = ip;
if (hash != nextHash) SetStop[hash] = ip +1;
}
}
__global__ void updateSetsDevice(const int pn, int *SetStart, int *SetStop,
const int* Hash) {
/**
* \brief Update particles
*
* \date Jan 6, 2010
* \author Luca Massidda
*/
__shared__ int prevHash[THREADS];
__shared__ int nextHash[THREADS];
int ip;
int hash;
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= pn) return;
hash = Hash[ip];
if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash;
if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash;
if (threadIdx.x == 0) {
if (ip == 0) prevHash[threadIdx.x] = -1;
else prevHash[threadIdx.x] = Hash[ip -1];
}
if (threadIdx.x == THREADS -1) {
if (ip == pn -1) nextHash[threadIdx.x] = -1;
else nextHash[threadIdx.x] = Hash[ip +1];
}
__syncthreads();
if (hash != prevHash[threadIdx.x]) SetStart[hash] = ip;
if (hash != nextHash[threadIdx.x]) SetStop[hash] = ip +1;
}
void updateListHost(const int pn, int *List,
const int* SetStart, const int* SetStop,
const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const struct grid Grid) {
int ip, ic, ix, iy, iz, i, j, k, jp, jc, np;
float dx, dy, dz, dr;
// Particles list is filled
for (ip = 0; ip < pn; ip++) {
ix = (int) ((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) ((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
np = 0;
for (k = -1; k <= 1; k++) {
for (j = -1; j <= 1; j++) {
for (i = -1; i <= 1; i++) {
jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY;
if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) {
for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) {
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) {
List[ip * MAXN + np] = jp;
np++;
}
}
}
}
}
}
while (np < MAXN) {
List[ip * MAXN + np] = ip;
np++;
}
}
}
__global__ void updateListDevice(const int pn, int *List,
const int* SetStart, const int* SetStop,
const float* Smooth,
const float* PosX, const float* PosY, const float* PosZ,
const struct grid Grid) {
int ip, ic, ix, iy, iz, i, j, k, jp, jc, np;
float dx, dy, dz, dr;
// Particles list is filled
ip = threadIdx.x + blockDim.x * blockIdx.x;
if (ip >= pn) return;
ix = (int) ((PosX[ip] - Grid.oX) / Grid.size);
iy = (int) ((PosY[ip] - Grid.oY) / Grid.size);
iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size);
ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY;
np = 0;
for (k = -1; k <= 1; k++) {
for (j = -1; j <= 1; j++) {
for (i = -1; i <= 1; i++) {
jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY;
if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) {
for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) {
dx = PosX[ip] - PosX[jp];
dy = PosY[ip] - PosY[jp];
dz = PosZ[ip] - PosZ[jp];
dr = sqrtf(dx * dx + dy * dy + dz * dz);
if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) {
List[ip * MAXN + np] = jp;
np++;
}
}
}
}
}
}
while (np < MAXN) {
List[ip * MAXN + np] = ip;
np++;
}
}
int neighbourListHost(struct model *hm) {
struct pair map[MAXP];
int i, ip, pout;
updateHashHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid);
checkOutHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid);
for (ip = 0; ip < hm->pn; ip++) hm->Index[ip] = ip;
for (ip = 0; ip < hm->pn; ip++) {
map[ip].key = hm->Hash[ip];
map[ip].value = hm->Index[ip];
}
qsort(map, hm->pn, sizeof(struct pair), mapCompare);
for (ip = 0; ip < hm->pn; ip++) {
hm->Hash[ip] = map[ip].key;
hm->Index[ip] = map[ip].value;
}
iSort(hm->Material, hm->Index, hm->pn);
fSort(hm->Mass, hm->Index, hm->pn);
fSort(hm->Smooth, hm->Index, hm->pn);
fSort(hm->PosX, hm->Index, hm->pn);
fSort(hm->PosY, hm->Index, hm->pn);
fSort(hm->PosZ, hm->Index, hm->pn);
fSort(hm->VelX, hm->Index, hm->pn);
fSort(hm->VelY, hm->Index, hm->pn);
fSort(hm->VelZ, hm->Index, hm->pn);
fSort(hm->Density, hm->Index, hm->pn);
fSort(hm->Energy, hm->Index, hm->pn);
fSort(hm->Pressure, hm->Index, hm->pn);
fSort(hm->Sound, hm->Index, hm->pn);
pout = 0;
for (ip = 0; ip < hm->pn; ip++) if (hm->Hash[ip] == hGrid.nX * hGrid.nY * hGrid.nZ) pout++;
hm->pn -= pout;
for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStart[i] = 0;
for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStop[i] = 0;
updateSetsHost(hm->pn, hm->SetStart, hm->SetStop, hm->Hash);
updateListHost(hm->pn, hm->List, hm->SetStart, hm->SetStop, hm->Smooth,
hm->PosX, hm->PosY, hm->PosZ, hGrid);
return 0;
}
int neighbourListDevice(struct model *dm) {
int pout;
int blocks, threads;
blocks = (dm->pn + THREADS - 1) / THREADS;
threads = THREADS;
thrust::device_ptr<int> tIndex(dm->Index);
thrust::device_ptr<int> tHash(dm->Hash);
thrust::device_ptr<int> tMaterial(dm->Material);
thrust::device_ptr<float> tMass(dm->Mass);
thrust::device_ptr<float> tSmooth(dm->Smooth);
thrust::device_ptr<float> tPosX(dm->PosX);
thrust::device_ptr<float> tPosY(dm->PosY);
thrust::device_ptr<float> tPosZ(dm->PosZ);
thrust::device_ptr<float> tVelX(dm->VelX);
thrust::device_ptr<float> tVelY(dm->VelY);
thrust::device_ptr<float> tVelZ(dm->VelZ);
thrust::device_ptr<float> tDensity(dm->Density);
thrust::device_ptr<float> tEnergy(dm->Energy);
thrust::device_ptr<float> tPressure(dm->Pressure);
thrust::device_ptr<float> tSound(dm->Sound);
thrust::device_ptr<int> tIntDummy(dm->IntDummy);
thrust::device_ptr<float> tFloatDummy(dm->FloatDummy);
thrust::device_ptr<int> tSetStart(dm->SetStart);
thrust::device_ptr<int> tSetStop(dm->SetStop);
updateHashDevice <<< blocks, threads >>>
(dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, hGrid);
checkOutDevice <<< blocks, threads >>>
(dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, hGrid);
thrust::sequence(tIndex, tIndex + dm->pn, 0);
thrust::sort_by_key(tHash, tHash + dm->pn, tIndex);
thrust::copy(tMaterial, tMaterial + dm->pn, tIntDummy);
thrust::gather(tIndex, tIndex + dm->pn, tIntDummy, tMaterial);
thrust::copy(tMass, tMass + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tMass);
thrust::copy(tSmooth, tSmooth + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSmooth);
thrust::copy(tPosX, tPosX + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosX);
thrust::copy(tPosY, tPosY + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosY);
thrust::copy(tPosZ, tPosZ + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosZ);
thrust::copy(tVelX, tVelX + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelX);
thrust::copy(tVelY, tVelY + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelY);
thrust::copy(tVelZ, tVelZ + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelZ);
thrust::copy(tDensity, tDensity + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tDensity);
thrust::copy(tEnergy, tEnergy + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tEnergy);
thrust::copy(tPressure, tPressure + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPressure);
thrust::copy(tSound, tSound + dm->pn, tFloatDummy);
thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSound);
pout = thrust::count(tHash, tHash + dm->pn, hGrid.nX * hGrid.nY * hGrid.nZ);
dm->pn -= pout;
thrust::fill(tSetStart, tSetStart + hGrid.nX * hGrid.nY * hGrid.nZ, 0);
thrust::fill(tSetStop, tSetStop + hGrid.nX * hGrid.nY * hGrid.nZ, 0);
updateSetsDevice <<< blocks, threads >>>
(dm->pn, dm->SetStart, dm->SetStop, dm->Hash);
updateListDevice <<< blocks, threads >>>
(dm->pn, dm->List, dm->SetStart, dm->SetStop, dm->Smooth,
dm->PosX, dm->PosY, dm->PosZ, hGrid);
return 0;
}
int RKstepHost(struct model *hm, float alpha) {
int ip;
for (ip = 0; ip < hm->pn; ip++) {
hm->VelDotX[ip] = 0.0f;
hm->VelDotY[ip] = 0.0f;
hm->VelDotZ[ip] = 0.0f;
hm->DensityDot[ip] = 0.0f;
hm->EnergyDot[ip] = 0.0f;
}
// External loads
updateLoadsHost(hm->pn, hm->Material,
hm->PosX, hm->PosY, hm->PosZ,
hm->VelX, hm->VelY, hm->VelZ,
hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->EnergyDot);
// Calculate particle interactions
balanceMassMomentumHost(hm->pn, hm->List, hm->Material, hm->Mass, hm->Smooth,
hm->PosX, hm->PosY, hm->PosZ,
hm->VelX, hm->VelY, hm->VelZ,
hm->Density, hm->Pressure, hm->Sound,
hm->DensityDot, hm->VelDotX, hm->VelDotY, hm->VelDotZ);
balanceEnergyHost(hm->pn, hm->Material, hm->Pressure, hm->Density,
hm->DensityDot, hm->EnergyDot);
// Update particles
updateParticlesHost(hm->pn, alpha, hm->Material,
hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->DensityDot, hm->EnergyDot,
hm->PosX0, hm->PosY0, hm->PosZ0,
hm->VelX0, hm->VelY0, hm->VelZ0, hm->Density0, hm->Energy0,
hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ,
hm->Density, hm->Energy, hm->Pressure, hm->Sound);
return 0;
}
int RKstepDevice(struct model *dm, float alpha, float time) {
int blocks, threads;
blocks = (dm->pn + THREADS - 1) / THREADS;
threads = THREADS;
thrust::device_ptr<float> tVelDotX(dm->VelDotX);
thrust::device_ptr<float> tVelDotY(dm->VelDotY);
thrust::device_ptr<float> tVelDotZ(dm->VelDotZ);
thrust::device_ptr<float> tDensityDot(dm->DensityDot);
thrust::device_ptr<float> tEnergyDot(dm->EnergyDot);
thrust::fill(tVelDotX, tVelDotX + dm->pn, 0.0f);
thrust::fill(tVelDotY, tVelDotY + dm->pn, 0.0f);
thrust::fill(tVelDotZ, tVelDotZ + dm->pn, 0.0f);
thrust::fill(tDensityDot, tDensityDot + dm->pn, 0.0f);
thrust::fill(tEnergyDot, tEnergyDot + dm->pn, 0.0f);
// External loads
updateLoadsDevice <<< blocks, threads >>>
(dm->pn, dm->Material,
dm->PosX, dm->PosY, dm->PosZ,
dm->VelX, dm->VelY, dm->VelZ,
dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->EnergyDot, time);
// Calculate particle interactions
balanceMassMomentumDevice <<< blocks, threads >>>
(dm->pn, dm->List, dm->Material, dm->Mass, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ,
dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Pressure, dm->Sound,
dm->DensityDot, dm->VelDotX, dm->VelDotY, dm->VelDotZ);
balanceEnergyDevice <<< blocks, threads >>>
(dm->pn, dm-> Material, dm->Pressure, dm->Density, dm->DensityDot, dm->EnergyDot);
// Update particles
updateParticlesDevice <<< blocks, threads >>>
(dm->pn, alpha, dm->Material, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->DensityDot, dm->EnergyDot,
dm->PosX0, dm->PosY0, dm->PosZ0, dm->VelX0, dm->VelY0, dm->VelZ0, dm->Density0, dm->Energy0,
dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Energy, dm->Pressure, dm->Sound);
return 0;
}
int RKintegrateHost(struct model *hm) {
/**
* \brief Runge Kutta 3rd order time integration
*
* Integrate the Navier Stokes equations in time with the
* Total Variation Diminishing Runge-Kutta algorithm of the 3rd order
*
* \date Dec 20, 2010
* \author Luca Massidda
*/
int ts;
// TIME CYCLE
for (ts = 0; ts <= hRun.tsn; ts++) {
// Output data
if ((ts % hRun.ssi) == 0) {
printf("Saving time: %g \n", ts * hRun.dt);
printf("Particles: %i \n", hm->pn);
printData(hm);
outputVTK(hm, ts / hRun.ssi);
}
// Inlet conditions
inletHost(hm);
// Calculate neighbour list
neighbourListHost(hm);
// Save initial condition
backupDataHost(hm);
// Step 1
RKstepHost(hm, 1.0);
// Step 2
RKstepHost(hm, 1.0 / 4.0);
// Step 3
RKstepHost(hm, 2.0 / 3.0);
}
return 0;
}
int RKintegrateDevice(struct model *hm, struct model *dm) {
/**
* \brief Runge Kutta 3rd order time integration
*
* Integrate the Navier Stokes equations in time with the
* Total Variation Diminishing Runge-Kutta algorithm of the 3rd order
*
* \date Dec 20, 2010
* \author Luca Massidda
*/
int ts;
// TIME CYCLE
for (ts = 0; ts <= hRun.tsn; ts++) {
// Output data
if ((ts % hRun.ssi) == 0) {
copyDeviceToHost(dm, hm);
printf("Saving time: %g \n", ts * hRun.dt);
printf("Particles: %i \n", hm->pn);
printData(hm);
outputVTK(hm, ts / hRun.ssi);
}
// Inlet conditions
inletDevice(dm);
// Calculate neighbour list
neighbourListDevice(dm);
// Save initial condition
backupDataDevice(dm);
// Step 1
RKstepDevice(dm, 1.0, ts * hRun.dt);
// Step 2
RKstepDevice(dm, 1.0 / 4.0, ts * hRun.dt);
// Step 3
RKstepDevice(dm, 2.0 / 3.0, ts * hRun.dt);
}
return 0;
}
int main() {
/**
* \brief armando2D v2.0
*
* An SPH code for non stationary fluid dynamics.
* This is the reviewed and improved C version of Armando v1.0
* developed at CERN in 2008
*
* \date Oct 20, 2010
* \author Luca Massidda
*/
struct model hModel, dModel;
int i;
initHost(&hModel);
for (i = 0; i < 10; i++) {
hLoad[i].gx = 0.0f;
hLoad[i].gy = 0.0f;
hLoad[i].gz = 0.0f;
hLoad[i].w = 0.0f;
hOut[i].nX = 0.0f;
hOut[i].nY = 0.0f;
hOut[i].nZ = 0.0f;
}
//initBox(&hModel);
//initBath(&hModel);
//initDamBreak(&hModel);
//initChannel(&hModel);
//initESS(&hModel);
steadyESS(&hModel);
initDevice(&dModel);
copyHostToDevice(&hModel, &dModel);
RKintegrateDevice(&hModel, &dModel);
//RKintegrateHost(&hModel);
return 0;
}
|
9f2a4b2fc1ab542e833a6e36166ca0544bcfc8a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <string.h>
#include <iostream>
#include <misc_helpers.h>
#include <face_quda.h>
#include <dslash_quda.h>
#ifdef DEVICE_PACK
#define REORDER_LOCATION QUDA_CUDA_FIELD_LOCATION
#else
#define REORDER_LOCATION QUDA_CPU_FIELD_LOCATION
#endif
int zeroCopy = 0;
namespace quda {
int cudaColorSpinorField::bufferIndex = 0;
int cudaColorSpinorField::initGhostFaceBuffer = 0;
void* cudaColorSpinorField::ghostFaceBuffer[2]; //gpu memory
void* cudaColorSpinorField::fwdGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
void* cudaColorSpinorField::backGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
size_t cudaColorSpinorField::ghostFaceBytes = 0;
/*cudaColorSpinorField::cudaColorSpinorField() :
ColorSpinorField(), v(0), norm(0), alloc(false), init(false) {
}*/
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorParam ¶m) :
ColorSpinorField(param), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// this must come before create
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
v = param.v;
norm = param.norm;
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// dp nothing
} else if (param.create == QUDA_COPY_FIELD_CREATE){
errorQuda("not implemented");
}
checkCudaError();
}
cudaColorSpinorField::cudaColorSpinorField(const cudaColorSpinorField &src) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
// creates a copy of src, any differences defined in param
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src,
const ColorSpinorParam ¶m) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// can only overide if we are not using a reference or parity special case
if (param.create != QUDA_REFERENCE_FIELD_CREATE ||
(param.create == QUDA_REFERENCE_FIELD_CREATE &&
src.SiteSubset() == QUDA_FULL_SITE_SUBSET &&
param.siteSubset == QUDA_PARITY_SITE_SUBSET &&
typeid(src) == typeid(cudaColorSpinorField) ) ||
(param.create == QUDA_REFERENCE_FIELD_CREATE && param.eigv_dim > 0)) {
reset(param);
} else {
errorQuda("Undefined behaviour"); // else silent bug possible?
}
// This must be set before create is called
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
v = (void*)src.V();
norm = (void*)src.Norm();
} else {
errorQuda("Cannot reference a non-cuda field");
}
if (this->EigvDim() > 0)
{//setup eigenvector form the set
if(eigv_dim != this->EigvDim()) errorQuda("\nEigenvector set does not match..\n") ;//for debug only.
if(eigv_id > -1)
{
//printfQuda("\nSetting pointers for vector id %d\n", eigv_id); //for debug only.
v = (void*)((char*)v + eigv_id*bytes);
norm = (void*)((char*)norm + eigv_id*norm_bytes);
}
//do nothing for the eigenvector subset...
}
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
copySpinorField(src);
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else {
errorQuda("CreateType %d not implemented", param.create);
}
}
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src)
: ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
ColorSpinorField& cudaColorSpinorField::operator=(const ColorSpinorField &src) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
*this = (dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cpuColorSpinorField)) {
*this = (dynamic_cast<const cpuColorSpinorField&>(src));
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cudaColorSpinorField &src) {
if (&src != this) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
destroyComms(); // not sure if this necessary
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
copySpinorField(src);
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cpuColorSpinorField &src) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
loadSpinorField(src);
return *this;
}
cudaColorSpinorField::~cudaColorSpinorField() {
destroyComms();
destroy();
}
bool cudaColorSpinorField::isNative() const {
if (precision == QUDA_DOUBLE_PRECISION) {
if (fieldOrder == QUDA_FLOAT2_FIELD_ORDER) return true;
} else if (precision == QUDA_SINGLE_PRECISION) {
if (nSpin == 4) {
if (fieldOrder == QUDA_FLOAT4_FIELD_ORDER) return true;
} else if (nSpin == 1) {
if (fieldOrder == QUDA_FLOAT2_FIELD_ORDER) return true;
}
} else if (precision == QUDA_HALF_PRECISION) {
if (nSpin == 4) {
if (fieldOrder == QUDA_FLOAT4_FIELD_ORDER) return true;
} else if (nSpin == 1) {
if (fieldOrder == QUDA_FLOAT2_FIELD_ORDER) return true;
}
}
return false;
}
void cudaColorSpinorField::create(const QudaFieldCreate create) {
if (siteSubset == QUDA_FULL_SITE_SUBSET && siteOrder != QUDA_EVEN_ODD_SITE_ORDER) {
errorQuda("Subset not implemented");
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
v = device_malloc(bytes);
if (precision == QUDA_HALF_PRECISION) {
norm = device_malloc(norm_bytes);
}
alloc = true;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
if(eigv_dim != 0) errorQuda("Eigenvectors must be parity fields!");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.x[0] /= 2; // set single parity dimensions
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
even = new cudaColorSpinorField(*this, param);
odd = new cudaColorSpinorField(*this, param);
// need this hackery for the moment (need to locate the odd pointers half way into the full field)
(dynamic_cast<cudaColorSpinorField*>(odd))->v = (void*)((char*)v + bytes/2);
if (precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->norm = (void*)((char*)norm + norm_bytes/2);
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
(dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i]) + bytes/2;
if(precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i]) + norm_bytes/2;
}
}
#ifdef USE_TEXTURE_OBJECTS
dynamic_cast<cudaColorSpinorField*>(even)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(even)->createTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->createTexObject();
#endif
}
else{//siteSubset == QUDA_PARITY_SITE_SUBSET
//! setup an object for selected eigenvector (the 1st one as a default):
if ((eigv_dim > 0) && (create != QUDA_REFERENCE_FIELD_CREATE) && (eigv_id == -1))
{
//if(bytes > 1811939328) warningQuda("\nCUDA API probably won't be able to create texture object for the eigenvector set... Object size is : %u bytes\n", bytes);
if (getVerbosity() == QUDA_DEBUG_VERBOSE) printfQuda("\nEigenvector set constructor...\n");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.eigv_dim = eigv_dim;
//reserve eigvector set
eigenvectors.reserve(eigv_dim);
//setup volume, [real_]length and stride for a single eigenvector
for(int id = 0; id < eigv_dim; id++)
{
param.eigv_id = id;
eigenvectors.push_back(new cudaColorSpinorField(*this, param));
#ifdef USE_TEXTURE_OBJECTS //(a lot of texture objects...)
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->createTexObject();
#endif
}
}
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
zeroPad();
} else {
(dynamic_cast<cudaColorSpinorField*>(even))->zeroPad();
(dynamic_cast<cudaColorSpinorField*>(odd))->zeroPad();
}
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
createTexObject();
#endif
// initialize the ghost pointers
if(siteSubset == QUDA_PARITY_SITE_SUBSET) {
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
ghost[i] = (char*)v + (stride + ghostOffset[i])*nColor*nSpin*2*precision;
if(precision == QUDA_HALF_PRECISION)
ghostNorm[i] = (char*)norm + (stride + ghostNormOffset[i])*QUDA_SINGLE_PRECISION;
}
}
}
checkCudaError();
}
#ifdef USE_TEXTURE_OBJECTS
void cudaColorSpinorField::createTexObject() {
if (isNative()) {
if (texInit) errorQuda("Already bound textures");
// create the texture for the field components
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = hipChannelFormatKindFloat;
else desc.f = hipChannelFormatKindSigned; // half is short, double is int2
// staggered fields in half and single are always two component
if (nSpin == 1 && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = v;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = hipReadModeNormalizedFloat;
else texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
// create the texture for the norm components
if (precision == QUDA_HALF_PRECISION) {
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
desc.f = hipChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = norm;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = norm_bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&texNorm, &resDesc, &texDesc, NULL);
checkCudaError();
}
texInit = true;
}
}
void cudaColorSpinorField::destroyTexObject() {
if (isNative() && texInit) {
hipDestroyTextureObject(tex);
if (precision == QUDA_HALF_PRECISION) hipDestroyTextureObject(texNorm);
texInit = false;
checkCudaError();
}
}
#endif
void cudaColorSpinorField::destroy() {
if (alloc) {
device_free(v);
if (precision == QUDA_HALF_PRECISION) device_free(norm);
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
//! for deflated solvers:
if (eigv_dim > 0)
{
std::vector<ColorSpinorField*>::iterator vec;
for(vec = eigenvectors.begin(); vec != eigenvectors.end(); vec++) delete *vec;
}
}
alloc = false;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
delete even;
delete odd;
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
destroyTexObject();
#endif
}
cudaColorSpinorField& cudaColorSpinorField::Even() const {
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
return *(dynamic_cast<cudaColorSpinorField*>(even));
}
errorQuda("Cannot return even subset of %d subset", siteSubset);
exit(-1);
}
cudaColorSpinorField& cudaColorSpinorField::Odd() const {
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
return *(dynamic_cast<cudaColorSpinorField*>(odd));
}
errorQuda("Cannot return odd subset of %d subset", siteSubset);
exit(-1);
}
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
void cudaColorSpinorField::zero() {
hipMemsetAsync(v, 0, bytes, streams[Nstream-1]);
if (precision == QUDA_HALF_PRECISION) hipMemsetAsync(norm, 0, norm_bytes, streams[Nstream-1]);
}
void cudaColorSpinorField::zeroPad() {
size_t pad_bytes = (stride - volume) * precision * fieldOrder;
int Npad = nColor * nSpin * 2 / fieldOrder;
if (eigv_dim > 0 && eigv_id == -1){//we consider the whole eigenvector set:
Npad *= eigv_dim;
pad_bytes /= eigv_dim;
}
size_t pitch = ((eigv_dim == 0 || eigv_id != -1) ? stride : eigv_stride)*fieldOrder*precision;
char *dst = (char*)v + ((eigv_dim == 0 || eigv_id != -1) ? volume : eigv_volume)*fieldOrder*precision;
if(pad_bytes) hipMemset2D(dst, pitch, 0, pad_bytes, Npad);
//for (int i=0; i<Npad; i++) {
// if (pad_bytes) hipMemset((char*)v + (volume + i*stride)*fieldOrder*precision, 0, pad_bytes);
//}
}
void cudaColorSpinorField::copy(const cudaColorSpinorField &src) {
checkField(*this, src);
copyCuda(*this, src);
}
void cudaColorSpinorField::copySpinorField(const ColorSpinorField &src) {
// src is on the device and is native
if (typeid(src) == typeid(cudaColorSpinorField) &&
isNative() && dynamic_cast<const cudaColorSpinorField &>(src).isNative()) {
copy(dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else if (typeid(src) == typeid(cpuColorSpinorField)) { // src is on the host
loadSpinorField(src);
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
}
void cudaColorSpinorField::loadSpinorField(const ColorSpinorField &src) {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(src) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b){
resizeBufferPinned(bytes + norm_bytes, b);
memset(bufferPinned[b], 0, bytes+norm_bytes); // FIXME (temporary?) bug fix for padding
}
copyGenericColorSpinor(*this, src, QUDA_CPU_FIELD_LOCATION,
bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes, 0);
hipMemcpy(v, bufferPinned[bufferIndex], bytes, hipMemcpyHostToDevice);
hipMemcpy(norm, (char*)bufferPinned[bufferIndex]+bytes, norm_bytes, hipMemcpyHostToDevice);
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else {
void *Src, *srcNorm;
if (!zeroCopy) {
resizeBufferDevice(src.Bytes()+src.NormBytes());
Src = bufferDevice;
srcNorm = (char*)bufferDevice + src.Bytes();
hipMemcpy(Src, src.V(), src.Bytes(), hipMemcpyHostToDevice);
hipMemcpy(srcNorm, src.Norm(), src.NormBytes(), hipMemcpyHostToDevice);
} else {
for(int b=0; b<2; ++b){
resizeBufferPinned(src.Bytes()+src.NormBytes(), b);
}
memcpy(bufferPinned[bufferIndex], src.V(), src.Bytes());
memcpy((char*)bufferPinned[bufferIndex]+src.Bytes(), src.Norm(), src.NormBytes());
hipHostGetDevicePointer(&Src, bufferPinned[bufferIndex], 0);
srcNorm = (void*)((char*)Src + src.Bytes());
}
hipMemset(v, 0, bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION, 0, Src, 0, srcNorm);
}
checkCudaError();
return;
}
void cudaColorSpinorField::saveSpinorField(ColorSpinorField &dest) const {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(dest) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b) resizeBufferPinned(bytes+norm_bytes,b);
hipMemcpy(bufferPinned[bufferIndex], v, bytes, hipMemcpyDeviceToHost);
hipMemcpy((char*)bufferPinned[bufferIndex]+bytes, norm, norm_bytes, hipMemcpyDeviceToHost);
copyGenericColorSpinor(dest, *this, QUDA_CPU_FIELD_LOCATION,
0, bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes);
} else if (typeid(dest) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION);
} else {
void *dst, *dstNorm;
if (!zeroCopy) {
resizeBufferDevice(dest.Bytes()+dest.NormBytes());
dst = bufferDevice;
dstNorm = (char*)bufferDevice+dest.Bytes();
} else {
for(int b=0; b<2; ++b) resizeBufferPinned(dest.Bytes()+dest.NormBytes(),b);
hipHostGetDevicePointer(&dst, bufferPinned[bufferIndex], 0);
dstNorm = (char*)dst+dest.Bytes();
}
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION, dst, v, dstNorm, 0);
if (!zeroCopy) {
hipMemcpy(dest.V(), dst, dest.Bytes(), hipMemcpyDeviceToHost);
hipMemcpy(dest.Norm(), dstNorm, dest.NormBytes(), hipMemcpyDeviceToHost);
} else {
memcpy(dest.V(), bufferPinned[bufferIndex], dest.Bytes());
memcpy(dest.Norm(), (char*)bufferPinned[bufferIndex]+dest.Bytes(), dest.NormBytes());
}
}
checkCudaError();
return;
}
void cudaColorSpinorField::allocateGhostBuffer(int nFace) {
int Nint = nColor * nSpin * 2; // number of internal degrees of freedom
if (nSpin == 4) Nint /= 2; // spin projection for Wilson
// compute size of buffer required
size_t faceBytes = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
faceBytes += 2*nFace*ghostFace[i]*Nint*precision;
// add extra space for the norms for half precision
if (precision == QUDA_HALF_PRECISION) faceBytes += 2*nFace*ghostFace[i]*sizeof(float);
}
// only allocate if not already allocated or buffer required is bigger than previously
if(initGhostFaceBuffer == 0 || faceBytes > ghostFaceBytes){
if (initGhostFaceBuffer){
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for(int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = 1;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b) backGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
for(int b=0; b<2; ++b) fwdGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
}
}
void cudaColorSpinorField::freeGhostBuffer(void)
{
if (!initGhostFaceBuffer) return;
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
for(int i=0;i < 4; i++){
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b){
backGhostFaceBuffer[b][i] = NULL;
fwdGhostFaceBuffer[b][i] = NULL;
}
}
initGhostFaceBuffer = 0;
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhost(const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t *stream,
void *buffer, double a, double b)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFace(packBuffer, *this, nFace, dagger, parity, dim, face_num, *stream, a, b);
#else
errorQuda("packGhost not built on single-GPU build");
#endif
}
// send the ghost zone to the host
void cudaColorSpinorField::sendGhost(void *ghost_spinor, const int nFace, const int dim,
const QudaDirection dir, const int dagger,
hipStream_t *stream) {
#ifdef MULTI_GPU
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
if (dim !=3 || getKernelPackT() || getTwistPack()) { // use kernels to pack into contiguous buffers then a single hipMemcpy
size_t bytes = nFace*Nint*ghostFace[dim]*precision;
if (precision == QUDA_HALF_PRECISION) bytes += nFace*ghostFace[dim]*sizeof(float);
void* gpu_buf =
(dir == QUDA_BACKWARDS) ? this->backGhostFaceBuffer[bufferIndex][dim] : this->fwdGhostFaceBuffer[bufferIndex][dim];
hipMemcpyAsync(ghost_spinor, gpu_buf, bytes, hipMemcpyDeviceToHost, *stream);
} else if(this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET){ // do multiple cudaMemcpys
int Npad = Nint / Nvec; // number Nvec buffers we have
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // N_t -1 = Vh-Vsh
int offset = 0;
if (nSpin == 1) {
offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;
if (upper) offset = (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
else offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + offset*Nvec*precision;
size_t len = nFace*ghostFace[3]*Nvec*precision;
size_t spitch = stride*Nvec*precision;
hipMemcpy2DAsync(dst, len, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + nFace*Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
hipMemcpyAsync(dst, src, nFace*ghostFace[3]*sizeof(float), hipMemcpyDeviceToHost, *stream);
}
}else{
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int Npad = Nint / Nvec; // number Nvec buffers we have
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper){
flavor1_offset = (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
dst = (char*)ghost_spinor+len;
src = (char*)v + flavor2_offset*Nvec*precision;
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (flavorVolume - flavorTFace);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
hipMemcpy2DAsync(dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, hipMemcpyDeviceToHost, *stream);
}
}
#else
errorQuda("sendGhost not built on single-GPU build");
#endif
}
void cudaColorSpinorField::unpackGhost(const void* ghost_spinor, const int nFace,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t* stream)
{
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
void *dst = (char*)v + precision*offset;
const void *src = ghost_spinor;
hipMemcpyAsync(dst, src, len*precision, hipMemcpyHostToDevice, *stream);
if (precision == QUDA_HALF_PRECISION) {
// norm region of host ghost zone is at the end of the ghost_spinor
int normlen = nFace*ghostFace[dim];
int norm_offset = stride + ghostNormOffset[dim];
norm_offset += (dir == QUDA_BACKWARDS) ? 0 : normlen;
void *dst = static_cast<char*>(norm) + norm_offset*sizeof(float);
const void *src = static_cast<const char*>(ghost_spinor)+nFace*Nint*ghostFace[dim]*precision;
hipMemcpyAsync(dst, src, normlen*sizeof(float), hipMemcpyHostToDevice, *stream);
}
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhostExtended(const int nFace, const int R[], const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t *stream,
void *buffer)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFaceExtended(packBuffer, *this, nFace, R, dagger, parity, dim, face_num, *stream);
#else
errorQuda("packGhostExtended not built on single-GPU build");
#endif
}
// copy data from host buffer into boundary region of device field
void cudaColorSpinorField::unpackGhostExtended(const void* ghost_spinor, const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t* stream)
{
// First call the regular unpackGhost routine to copy data into the `usual' ghost-zone region
// of the data array
unpackGhost(ghost_spinor, nFace, dim, dir, dagger, stream);
// Next step is to copy data from the ghost zone back to the interior region
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
#ifdef MULTI_GPU
const int face_num = 2;
const bool unpack = true;
const int R[4] = {0,0,0,0};
packFaceExtended(ghostFaceBuffer[bufferIndex], *this, nFace, R, dagger, parity, dim, face_num, *stream, unpack);
#else
errorQuda("unpackGhostExtended not built on single-GPU build");
#endif
}
hipStream_t *stream;
void cudaColorSpinorField::createComms(int nFace) {
if(bufferMessageHandler != bufferPinnedResizeCount) destroyComms();
if (!initComms || nFaceComms != nFace) {
// if we are requesting a new number of faces destroy and start over
if(nFace != nFaceComms) destroyComms();
if (siteSubset != QUDA_PARITY_SITE_SUBSET)
errorQuda("Only supports single parity fields");
#ifdef GPU_COMMS
bool comms = false;
for (int i=0; i<nDimComms; i++) if (commDimPartitioned(i)) comms = true;
#endif
if (nFace > maxNface)
errorQuda("Requested number of faces %d in communicator is greater than supported %d",
nFace, maxNface);
// faceBytes is the sum of all face sizes
size_t faceBytes = 0;
// nbytes is the size in bytes of each face
size_t nbytes[QUDA_MAX_DIM];
// The number of degrees of freedom per site for the given
// field. Currently assumes spin projection of a Wilson-like
// field (so half the number of degrees of freedom).
int Ndof = (2 * nSpin * nColor) / (nSpin==4 ? 2 : 1);
for (int i=0; i<nDimComms; i++) {
nbytes[i] = maxNface*surfaceCB[i]*Ndof*precision;
if (precision == QUDA_HALF_PRECISION) nbytes[i] += maxNface*surfaceCB[i]*sizeof(float);
if (!commDimPartitioned(i)) continue;
faceBytes += 2*nbytes[i];
}
#ifndef GPU_COMMS
// use static pinned memory for face buffers
for(int b=0; b<2; ++b){
resizeBufferPinned(2*faceBytes, b); // oversizes for GPU_COMMS case
my_face[b] = bufferPinned[b];
from_face[b] = static_cast<char*>(bufferPinned[b]) + faceBytes;
}
// assign pointers for each face - it's ok to alias for different Nface parameters
size_t offset = 0;
#endif
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_back_face[b][i] = backGhostFaceBuffer[b][i];
from_back_face[b][i] = ghost[i];
if(precision == QUDA_HALF_PRECISION){
my_back_norm_face[b][i] = static_cast<char*>(backGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_back_norm_face[b][i] = ghostNorm[i];
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_back_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_back_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = fwdGhostFaceBuffer[b][i];
from_fwd_face[b][i] = ghost[i] + nFace*ghostFace[i]*Ndof*precision;
if(precision == QUDA_HALF_PRECISION){
my_fwd_norm_face[b][i] = static_cast<char*>(fwdGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_fwd_norm_face[b][i] = static_cast<char*>(ghostNorm[i]) + nFace*ghostFace[i]*sizeof(float);
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_fwd_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
}
// create a different message handler for each direction and Nface
for(int b=0; b<2; ++b){
mh_send_fwd[b] = new MsgHandle**[maxNface];
mh_send_back[b] = new MsgHandle**[maxNface];
mh_recv_fwd[b] = new MsgHandle**[maxNface];
mh_recv_back[b] = new MsgHandle**[maxNface];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b] = new MsgHandle**[maxNface];
mh_send_norm_back[b] = new MsgHandle**[maxNface];
mh_recv_norm_fwd[b] = new MsgHandle**[maxNface];
mh_recv_norm_back[b] = new MsgHandle**[maxNface];
}
#endif
} // loop over b
for (int j=0; j<maxNface; j++) {
for(int b=0; b<2; ++b){
mh_send_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_back[b][j] = new MsgHandle*[nDimComms];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_norm_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_norm_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_norm_back[b][j] = new MsgHandle*[nDimComms];
}
#endif
} // loop over b
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
size_t nbytes_Nface = surfaceCB[i]*Ndof*precision*(j+1);
size_t nbytes_Nface_norm = surfaceCB[i]*(j+1)*sizeof(float);
if (i != 3 || getKernelPackT() || getTwistPack()) {
#else
size_t nbytes_Nface = (nbytes[i] / maxNface) * (j+1);
#endif
for(int b=0; b<2; ++b){
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_face[b][i], i, -1, nbytes_Nface) : NULL;
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i]; // alias pointers
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i]; // alias pointers
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
} else if (this->TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET) {
errorQuda("GPU_COMMS for non-degenerate doublet only supported with time-dimension kernel packing enabled.");
} else {
/*
use a strided communicator, here we can't really use
the previously declared my_fwd_face and my_back_face
pointers since they don't really map 1-to-1 so let's
just compute the required base pointers and pass these
directly into the communicator construction
*/
int Nblocks = Ndof / Nvec(); // number of Nvec buffers we have
// start of last time slice chunk we are sending forwards
int endOffset = (volume - (j+1)*ghostFace[i]);
size_t offset[4];
void *base[4];
if (nSpin == 1) { // staggered is invariant with dagger
offset[2*0 + 0] = 0;
offset[2*1 + 0] = endOffset;
offset[2*0 + 1] = offset[2*0 + 0];
offset[2*1 + 1] = offset[2*1 + 0];
} else if (nSpin == 4) {
// !dagger: send last components backwards, send first components forwards
offset[2*0 + 0] = Nblocks*stride;
offset[2*1 + 0] = endOffset;
// dagger: send first components backwards, send last components forwards
offset[2*0 + 1] = 0;
offset[2*1 + 1] = Nblocks*stride + endOffset;
} else {
errorQuda("Unsupported number of spin components");
}
for (int k=0; k<4; k++) {
base[k] = static_cast<char*>(v) + offset[k]*Nvec()*precision; // total offset in bytes
}
size_t blksize = (j+1)*ghostFace[i]*Nvec()*precision; // (j+1) is number of faces
size_t Stride = stride*Nvec()*precision;
if (blksize * Nblocks != nbytes_Nface)
errorQuda("Total strided message size does not match expected size");
//printf("%d strided sends with Nface=%d Nblocks=%d blksize=%d Stride=%d\n", i, j+1, Nblocks, blksize, Stride);
for(int b=0; b<2; ++b){
// only allocate a communicator for the present face (this needs cleaned up)
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[2], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[0], i, -1, blksize, Nblocks, Stride) : NULL;
if (nSpin ==4) { // dagger communicators
mh_send_fwd[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[3], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[1], i, -1, blksize, Nblocks, Stride) : NULL;
} else {
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i+0];
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i+0];
}
} // loop over b
if(precision == QUDA_HALF_PRECISION){
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // The space-time coordinate of the start of the last time slice
void *norm_fwd = static_cast<float*>(norm) + Nt_minus1_offset;
void *norm_back = norm; // the first time slice has zero offset
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_fwd, i, +1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_back, i, -1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
}
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_recv_norm_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_recv_norm_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
}
}
#endif // GPU_COMMS
for(int b=0; b<2; ++b){
mh_recv_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_recv_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_face[b][i], i, -1, nbytes_Nface) : NULL;
}
} // loop over dimension
}
bufferMessageHandler = bufferPinnedResizeCount;
initComms = true;
nFaceComms = nFace;
}
checkCudaError();
}
void cudaColorSpinorField::destroyComms() {
if (initComms) {
for(int b=0; b<2; ++b){
for (int j=0; j<maxNface; j++) {
for (int i=0; i<nDimComms; i++) {
if (commDimPartitioned(i)) {
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_fwd[b][j][i]);
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_back[b][j][i]);
if (mh_send_fwd[b][j][2*i]) comm_free(mh_send_fwd[b][j][2*i]);
if (mh_send_back[b][j][2*i]) comm_free(mh_send_back[b][j][2*i]);
// only in a special case are these not aliasing pointers
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
if (mh_recv_norm_fwd[b][j][i]) comm_free(mh_recv_norm_fwd[b][j][i]);
if (mh_recv_norm_back[b][j][i]) comm_free(mh_recv_norm_back[b][j][i]);
if (mh_send_norm_fwd[b][j][2*i]) comm_free(mh_send_norm_fwd[b][j][2*i]);
if (mh_send_norm_back[b][j][2*i]) comm_free(mh_send_norm_back[b][j][2*i]);
}
if (i == 3 && !getKernelPackT() && nSpin == 4) {
if (mh_send_fwd[b][j][2*i+1]) comm_free(mh_send_fwd[b][j][2*i+1]);
if (mh_send_back[b][j][2*i+1]) comm_free(mh_send_back[b][j][2*i+1]);
}
#endif // GPU_COMMS
}
}
delete []mh_recv_fwd[b][j];
delete []mh_recv_back[b][j];
delete []mh_send_fwd[b][j];
delete []mh_send_back[b][j];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b][j];
delete []mh_recv_norm_back[b][j];
delete []mh_send_norm_fwd[b][j];
delete []mh_send_norm_back[b][j];
}
#endif
}
delete []mh_recv_fwd[b];
delete []mh_recv_back[b];
delete []mh_send_fwd[b];
delete []mh_send_back[b];
for (int i=0; i<nDimComms; i++) {
my_fwd_face[b][i] = NULL;
my_back_face[b][i] = NULL;
from_fwd_face[b][i] = NULL;
from_back_face[b][i] = NULL;
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b];
delete []mh_recv_norm_back[b];
delete []mh_send_norm_fwd[b];
delete []mh_send_norm_back[b];
}
for(int i=0; i<nDimComms; i++){
my_fwd_norm_face[b][i] = NULL;
my_back_norm_face[b][i] = NULL;
from_fwd_norm_face[b][i] = NULL;
from_back_norm_face[b][i] = NULL;
}
#endif
} // loop over b
initComms = false;
checkCudaError();
}
}
void cudaColorSpinorField::streamInit(hipStream_t *stream_p){
stream = stream_p;
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, hipStream_t *stream_p,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
hipHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, a, b);
}
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, int stream_idx,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
hipHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], 0, a, b);
}
}
void cudaColorSpinorField::packExtended(const int nFace, const int R[], const int parity,
const int dagger, const int dim,
hipStream_t *stream_p, const bool zeroCopyPack){
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
void *my_face_d = NULL;
if(zeroCopyPack){
hipHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0);
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d);
}else{
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], my_face_d);
}
}
void cudaColorSpinorField::gather(int nFace, int dagger, int dir, hipStream_t* stream_p)
{
int dim = dir/2;
// If stream_p != 0, use pack_stream, else use the stream array
hipStream_t *pack_stream = (stream_p) ? stream_p : stream+dir;
if(dir%2 == 0){
// backwards copy to host
sendGhost(my_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, pack_stream);
} else {
// forwards copy to host
sendGhost(my_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, pack_stream);
}
}
void cudaColorSpinorField::recvStart(int nFace, int dir, int dagger) {
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
}
#endif
}
void cudaColorSpinorField::sendStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
void cudaColorSpinorField::commsStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
int cudaColorSpinorField::commsQuery(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return 0;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
#ifdef GPU_COMMS
}else{ // half precision
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
} // half precision
#endif
return 0;
}
void cudaColorSpinorField::commsWait(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
} else { // half precision
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
} // half precision
#endif
return;
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir, hipStream_t* stream_p)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, stream_p);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, stream_p);
}
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
void cudaColorSpinorField::scatterExtended(int nFace, int parity, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
unpackGhostExtended(from_fwd_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhostExtended(from_back_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
// Return the location of the field
QudaFieldLocation cudaColorSpinorField::Location() const { return QUDA_CUDA_FIELD_LOCATION; }
std::ostream& operator<<(std::ostream &out, const cudaColorSpinorField &a) {
out << (const ColorSpinorField&)a;
out << "v = " << a.v << std::endl;
out << "norm = " << a.norm << std::endl;
out << "alloc = " << a.alloc << std::endl;
out << "init = " << a.init << std::endl;
return out;
}
//! for deflated solvers:
cudaColorSpinorField& cudaColorSpinorField::Eigenvec(const int idx) const {
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (idx < this->EigvDim()) {//setup eigenvector form the set
return *(dynamic_cast<cudaColorSpinorField*>(eigenvectors[idx]));
}
else{
errorQuda("Incorrect eigenvector index...");
}
}
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
//copyCuda currently cannot not work with set of spinor fields..
void cudaColorSpinorField::CopyEigenvecSubset(cudaColorSpinorField &dst, const int range, const int first_element) const{
#if 0
if(first_element < 0) errorQuda("\nError: trying to set negative first element.\n");
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (first_element == 0 && range == this->EigvDim())
{
if(range != dst.EigvDim())errorQuda("\nError: eigenvector range to big.\n");
checkField(dst, *this);
copyCuda(dst, *this);
}
else if ((first_element+range) < this->EigvDim())
{//setup eigenvector subset
cudaColorSpinorField *eigv_subset;
ColorSpinorParam param;
param.nColor = nColor;
param.nSpin = nSpin;
param.twistFlavor = twistFlavor;
param.precision = precision;
param.nDim = nDim;
param.pad = pad;
param.siteSubset = siteSubset;
param.siteOrder = siteOrder;
param.fieldOrder = fieldOrder;
param.gammaBasis = gammaBasis;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.eigv_dim = range;
param.eigv_id = -1;
param.v = (void*)((char*)v + first_element*eigv_bytes);
param.norm = (void*)((char*)norm + first_element*eigv_norm_bytes);
eigv_subset = new cudaColorSpinorField(param);
//Not really needed:
eigv_subset->eigenvectors.reserve(param.eigv_dim);
for(int id = first_element; id < (first_element+range); id++)
{
param.eigv_id = id;
eigv_subset->eigenvectors.push_back(new cudaColorSpinorField(*this, param));
}
checkField(dst, *eigv_subset);
copyCuda(dst, *eigv_subset);
delete eigv_subset;
}
else{
errorQuda("Incorrect eigenvector dimension...");
}
}
else{
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
#endif
}
void cudaColorSpinorField::getTexObjectInfo() const
{
#ifdef USE_TEXTURE_OBJECTS
printfQuda("\nPrint texture info for the field:\n");
std::cout << *this;
hipResourceDesc resDesc;
//memset(&resDesc, 0, sizeof(resDesc));
hipGetTextureObjectResourceDesc(&resDesc, this->Tex());
printfQuda("\nDevice pointer: %p\n", resDesc.res.linear.devPtr);
printfQuda("\nVolume (in bytes): %d\n", resDesc.res.linear.sizeInBytes);
if (resDesc.resType == hipResourceTypeLinear) printfQuda("\nResource type: linear \n");
checkCudaError();
#endif
}
} // namespace quda
| 9f2a4b2fc1ab542e833a6e36166ca0544bcfc8a8.cu | #include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <string.h>
#include <iostream>
#include <misc_helpers.h>
#include <face_quda.h>
#include <dslash_quda.h>
#ifdef DEVICE_PACK
#define REORDER_LOCATION QUDA_CUDA_FIELD_LOCATION
#else
#define REORDER_LOCATION QUDA_CPU_FIELD_LOCATION
#endif
int zeroCopy = 0;
namespace quda {
int cudaColorSpinorField::bufferIndex = 0;
int cudaColorSpinorField::initGhostFaceBuffer = 0;
void* cudaColorSpinorField::ghostFaceBuffer[2]; //gpu memory
void* cudaColorSpinorField::fwdGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
void* cudaColorSpinorField::backGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
size_t cudaColorSpinorField::ghostFaceBytes = 0;
/*cudaColorSpinorField::cudaColorSpinorField() :
ColorSpinorField(), v(0), norm(0), alloc(false), init(false) {
}*/
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorParam ¶m) :
ColorSpinorField(param), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// this must come before create
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
v = param.v;
norm = param.norm;
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// dp nothing
} else if (param.create == QUDA_COPY_FIELD_CREATE){
errorQuda("not implemented");
}
checkCudaError();
}
cudaColorSpinorField::cudaColorSpinorField(const cudaColorSpinorField &src) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
// creates a copy of src, any differences defined in param
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src,
const ColorSpinorParam ¶m) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// can only overide if we are not using a reference or parity special case
if (param.create != QUDA_REFERENCE_FIELD_CREATE ||
(param.create == QUDA_REFERENCE_FIELD_CREATE &&
src.SiteSubset() == QUDA_FULL_SITE_SUBSET &&
param.siteSubset == QUDA_PARITY_SITE_SUBSET &&
typeid(src) == typeid(cudaColorSpinorField) ) ||
(param.create == QUDA_REFERENCE_FIELD_CREATE && param.eigv_dim > 0)) {
reset(param);
} else {
errorQuda("Undefined behaviour"); // else silent bug possible?
}
// This must be set before create is called
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
v = (void*)src.V();
norm = (void*)src.Norm();
} else {
errorQuda("Cannot reference a non-cuda field");
}
if (this->EigvDim() > 0)
{//setup eigenvector form the set
if(eigv_dim != this->EigvDim()) errorQuda("\nEigenvector set does not match..\n") ;//for debug only.
if(eigv_id > -1)
{
//printfQuda("\nSetting pointers for vector id %d\n", eigv_id); //for debug only.
v = (void*)((char*)v + eigv_id*bytes);
norm = (void*)((char*)norm + eigv_id*norm_bytes);
}
//do nothing for the eigenvector subset...
}
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
copySpinorField(src);
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else {
errorQuda("CreateType %d not implemented", param.create);
}
}
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src)
: ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
ColorSpinorField& cudaColorSpinorField::operator=(const ColorSpinorField &src) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
*this = (dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cpuColorSpinorField)) {
*this = (dynamic_cast<const cpuColorSpinorField&>(src));
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cudaColorSpinorField &src) {
if (&src != this) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
destroyComms(); // not sure if this necessary
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
copySpinorField(src);
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cpuColorSpinorField &src) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
loadSpinorField(src);
return *this;
}
cudaColorSpinorField::~cudaColorSpinorField() {
destroyComms();
destroy();
}
bool cudaColorSpinorField::isNative() const {
if (precision == QUDA_DOUBLE_PRECISION) {
if (fieldOrder == QUDA_FLOAT2_FIELD_ORDER) return true;
} else if (precision == QUDA_SINGLE_PRECISION) {
if (nSpin == 4) {
if (fieldOrder == QUDA_FLOAT4_FIELD_ORDER) return true;
} else if (nSpin == 1) {
if (fieldOrder == QUDA_FLOAT2_FIELD_ORDER) return true;
}
} else if (precision == QUDA_HALF_PRECISION) {
if (nSpin == 4) {
if (fieldOrder == QUDA_FLOAT4_FIELD_ORDER) return true;
} else if (nSpin == 1) {
if (fieldOrder == QUDA_FLOAT2_FIELD_ORDER) return true;
}
}
return false;
}
void cudaColorSpinorField::create(const QudaFieldCreate create) {
if (siteSubset == QUDA_FULL_SITE_SUBSET && siteOrder != QUDA_EVEN_ODD_SITE_ORDER) {
errorQuda("Subset not implemented");
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
v = device_malloc(bytes);
if (precision == QUDA_HALF_PRECISION) {
norm = device_malloc(norm_bytes);
}
alloc = true;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
if(eigv_dim != 0) errorQuda("Eigenvectors must be parity fields!");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.x[0] /= 2; // set single parity dimensions
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
even = new cudaColorSpinorField(*this, param);
odd = new cudaColorSpinorField(*this, param);
// need this hackery for the moment (need to locate the odd pointers half way into the full field)
(dynamic_cast<cudaColorSpinorField*>(odd))->v = (void*)((char*)v + bytes/2);
if (precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->norm = (void*)((char*)norm + norm_bytes/2);
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
(dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i]) + bytes/2;
if(precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i]) + norm_bytes/2;
}
}
#ifdef USE_TEXTURE_OBJECTS
dynamic_cast<cudaColorSpinorField*>(even)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(even)->createTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->createTexObject();
#endif
}
else{//siteSubset == QUDA_PARITY_SITE_SUBSET
//! setup an object for selected eigenvector (the 1st one as a default):
if ((eigv_dim > 0) && (create != QUDA_REFERENCE_FIELD_CREATE) && (eigv_id == -1))
{
//if(bytes > 1811939328) warningQuda("\nCUDA API probably won't be able to create texture object for the eigenvector set... Object size is : %u bytes\n", bytes);
if (getVerbosity() == QUDA_DEBUG_VERBOSE) printfQuda("\nEigenvector set constructor...\n");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.eigv_dim = eigv_dim;
//reserve eigvector set
eigenvectors.reserve(eigv_dim);
//setup volume, [real_]length and stride for a single eigenvector
for(int id = 0; id < eigv_dim; id++)
{
param.eigv_id = id;
eigenvectors.push_back(new cudaColorSpinorField(*this, param));
#ifdef USE_TEXTURE_OBJECTS //(a lot of texture objects...)
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->createTexObject();
#endif
}
}
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
zeroPad();
} else {
(dynamic_cast<cudaColorSpinorField*>(even))->zeroPad();
(dynamic_cast<cudaColorSpinorField*>(odd))->zeroPad();
}
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
createTexObject();
#endif
// initialize the ghost pointers
if(siteSubset == QUDA_PARITY_SITE_SUBSET) {
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
ghost[i] = (char*)v + (stride + ghostOffset[i])*nColor*nSpin*2*precision;
if(precision == QUDA_HALF_PRECISION)
ghostNorm[i] = (char*)norm + (stride + ghostNormOffset[i])*QUDA_SINGLE_PRECISION;
}
}
}
checkCudaError();
}
#ifdef USE_TEXTURE_OBJECTS
void cudaColorSpinorField::createTexObject() {
if (isNative()) {
if (texInit) errorQuda("Already bound textures");
// create the texture for the field components
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = cudaChannelFormatKindFloat;
else desc.f = cudaChannelFormatKindSigned; // half is short, double is int2
// staggered fields in half and single are always two component
if (nSpin == 1 && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = v;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = cudaReadModeNormalizedFloat;
else texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
// create the texture for the norm components
if (precision == QUDA_HALF_PRECISION) {
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
desc.f = cudaChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = norm;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = norm_bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texNorm, &resDesc, &texDesc, NULL);
checkCudaError();
}
texInit = true;
}
}
void cudaColorSpinorField::destroyTexObject() {
if (isNative() && texInit) {
cudaDestroyTextureObject(tex);
if (precision == QUDA_HALF_PRECISION) cudaDestroyTextureObject(texNorm);
texInit = false;
checkCudaError();
}
}
#endif
void cudaColorSpinorField::destroy() {
if (alloc) {
device_free(v);
if (precision == QUDA_HALF_PRECISION) device_free(norm);
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
//! for deflated solvers:
if (eigv_dim > 0)
{
std::vector<ColorSpinorField*>::iterator vec;
for(vec = eigenvectors.begin(); vec != eigenvectors.end(); vec++) delete *vec;
}
}
alloc = false;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
delete even;
delete odd;
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
destroyTexObject();
#endif
}
cudaColorSpinorField& cudaColorSpinorField::Even() const {
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
return *(dynamic_cast<cudaColorSpinorField*>(even));
}
errorQuda("Cannot return even subset of %d subset", siteSubset);
exit(-1);
}
cudaColorSpinorField& cudaColorSpinorField::Odd() const {
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
return *(dynamic_cast<cudaColorSpinorField*>(odd));
}
errorQuda("Cannot return odd subset of %d subset", siteSubset);
exit(-1);
}
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
void cudaColorSpinorField::zero() {
cudaMemsetAsync(v, 0, bytes, streams[Nstream-1]);
if (precision == QUDA_HALF_PRECISION) cudaMemsetAsync(norm, 0, norm_bytes, streams[Nstream-1]);
}
void cudaColorSpinorField::zeroPad() {
size_t pad_bytes = (stride - volume) * precision * fieldOrder;
int Npad = nColor * nSpin * 2 / fieldOrder;
if (eigv_dim > 0 && eigv_id == -1){//we consider the whole eigenvector set:
Npad *= eigv_dim;
pad_bytes /= eigv_dim;
}
size_t pitch = ((eigv_dim == 0 || eigv_id != -1) ? stride : eigv_stride)*fieldOrder*precision;
char *dst = (char*)v + ((eigv_dim == 0 || eigv_id != -1) ? volume : eigv_volume)*fieldOrder*precision;
if(pad_bytes) cudaMemset2D(dst, pitch, 0, pad_bytes, Npad);
//for (int i=0; i<Npad; i++) {
// if (pad_bytes) cudaMemset((char*)v + (volume + i*stride)*fieldOrder*precision, 0, pad_bytes);
//}
}
void cudaColorSpinorField::copy(const cudaColorSpinorField &src) {
checkField(*this, src);
copyCuda(*this, src);
}
void cudaColorSpinorField::copySpinorField(const ColorSpinorField &src) {
// src is on the device and is native
if (typeid(src) == typeid(cudaColorSpinorField) &&
isNative() && dynamic_cast<const cudaColorSpinorField &>(src).isNative()) {
copy(dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else if (typeid(src) == typeid(cpuColorSpinorField)) { // src is on the host
loadSpinorField(src);
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
}
void cudaColorSpinorField::loadSpinorField(const ColorSpinorField &src) {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(src) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b){
resizeBufferPinned(bytes + norm_bytes, b);
memset(bufferPinned[b], 0, bytes+norm_bytes); // FIXME (temporary?) bug fix for padding
}
copyGenericColorSpinor(*this, src, QUDA_CPU_FIELD_LOCATION,
bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes, 0);
cudaMemcpy(v, bufferPinned[bufferIndex], bytes, cudaMemcpyHostToDevice);
cudaMemcpy(norm, (char*)bufferPinned[bufferIndex]+bytes, norm_bytes, cudaMemcpyHostToDevice);
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else {
void *Src, *srcNorm;
if (!zeroCopy) {
resizeBufferDevice(src.Bytes()+src.NormBytes());
Src = bufferDevice;
srcNorm = (char*)bufferDevice + src.Bytes();
cudaMemcpy(Src, src.V(), src.Bytes(), cudaMemcpyHostToDevice);
cudaMemcpy(srcNorm, src.Norm(), src.NormBytes(), cudaMemcpyHostToDevice);
} else {
for(int b=0; b<2; ++b){
resizeBufferPinned(src.Bytes()+src.NormBytes(), b);
}
memcpy(bufferPinned[bufferIndex], src.V(), src.Bytes());
memcpy((char*)bufferPinned[bufferIndex]+src.Bytes(), src.Norm(), src.NormBytes());
cudaHostGetDevicePointer(&Src, bufferPinned[bufferIndex], 0);
srcNorm = (void*)((char*)Src + src.Bytes());
}
cudaMemset(v, 0, bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION, 0, Src, 0, srcNorm);
}
checkCudaError();
return;
}
void cudaColorSpinorField::saveSpinorField(ColorSpinorField &dest) const {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(dest) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b) resizeBufferPinned(bytes+norm_bytes,b);
cudaMemcpy(bufferPinned[bufferIndex], v, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy((char*)bufferPinned[bufferIndex]+bytes, norm, norm_bytes, cudaMemcpyDeviceToHost);
copyGenericColorSpinor(dest, *this, QUDA_CPU_FIELD_LOCATION,
0, bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes);
} else if (typeid(dest) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION);
} else {
void *dst, *dstNorm;
if (!zeroCopy) {
resizeBufferDevice(dest.Bytes()+dest.NormBytes());
dst = bufferDevice;
dstNorm = (char*)bufferDevice+dest.Bytes();
} else {
for(int b=0; b<2; ++b) resizeBufferPinned(dest.Bytes()+dest.NormBytes(),b);
cudaHostGetDevicePointer(&dst, bufferPinned[bufferIndex], 0);
dstNorm = (char*)dst+dest.Bytes();
}
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION, dst, v, dstNorm, 0);
if (!zeroCopy) {
cudaMemcpy(dest.V(), dst, dest.Bytes(), cudaMemcpyDeviceToHost);
cudaMemcpy(dest.Norm(), dstNorm, dest.NormBytes(), cudaMemcpyDeviceToHost);
} else {
memcpy(dest.V(), bufferPinned[bufferIndex], dest.Bytes());
memcpy(dest.Norm(), (char*)bufferPinned[bufferIndex]+dest.Bytes(), dest.NormBytes());
}
}
checkCudaError();
return;
}
void cudaColorSpinorField::allocateGhostBuffer(int nFace) {
int Nint = nColor * nSpin * 2; // number of internal degrees of freedom
if (nSpin == 4) Nint /= 2; // spin projection for Wilson
// compute size of buffer required
size_t faceBytes = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
faceBytes += 2*nFace*ghostFace[i]*Nint*precision;
// add extra space for the norms for half precision
if (precision == QUDA_HALF_PRECISION) faceBytes += 2*nFace*ghostFace[i]*sizeof(float);
}
// only allocate if not already allocated or buffer required is bigger than previously
if(initGhostFaceBuffer == 0 || faceBytes > ghostFaceBytes){
if (initGhostFaceBuffer){
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for(int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = 1;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b) backGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
for(int b=0; b<2; ++b) fwdGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
}
}
void cudaColorSpinorField::freeGhostBuffer(void)
{
if (!initGhostFaceBuffer) return;
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
for(int i=0;i < 4; i++){
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b){
backGhostFaceBuffer[b][i] = NULL;
fwdGhostFaceBuffer[b][i] = NULL;
}
}
initGhostFaceBuffer = 0;
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhost(const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t *stream,
void *buffer, double a, double b)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFace(packBuffer, *this, nFace, dagger, parity, dim, face_num, *stream, a, b);
#else
errorQuda("packGhost not built on single-GPU build");
#endif
}
// send the ghost zone to the host
void cudaColorSpinorField::sendGhost(void *ghost_spinor, const int nFace, const int dim,
const QudaDirection dir, const int dagger,
cudaStream_t *stream) {
#ifdef MULTI_GPU
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
if (dim !=3 || getKernelPackT() || getTwistPack()) { // use kernels to pack into contiguous buffers then a single cudaMemcpy
size_t bytes = nFace*Nint*ghostFace[dim]*precision;
if (precision == QUDA_HALF_PRECISION) bytes += nFace*ghostFace[dim]*sizeof(float);
void* gpu_buf =
(dir == QUDA_BACKWARDS) ? this->backGhostFaceBuffer[bufferIndex][dim] : this->fwdGhostFaceBuffer[bufferIndex][dim];
cudaMemcpyAsync(ghost_spinor, gpu_buf, bytes, cudaMemcpyDeviceToHost, *stream);
} else if(this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET){ // do multiple cudaMemcpys
int Npad = Nint / Nvec; // number Nvec buffers we have
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // N_t -1 = Vh-Vsh
int offset = 0;
if (nSpin == 1) {
offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;
if (upper) offset = (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
else offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + offset*Nvec*precision;
size_t len = nFace*ghostFace[3]*Nvec*precision;
size_t spitch = stride*Nvec*precision;
cudaMemcpy2DAsync(dst, len, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + nFace*Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
cudaMemcpyAsync(dst, src, nFace*ghostFace[3]*sizeof(float), cudaMemcpyDeviceToHost, *stream);
}
}else{
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int Npad = Nint / Nvec; // number Nvec buffers we have
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper){
flavor1_offset = (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
dst = (char*)ghost_spinor+len;
src = (char*)v + flavor2_offset*Nvec*precision;
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (flavorVolume - flavorTFace);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
cudaMemcpy2DAsync(dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, cudaMemcpyDeviceToHost, *stream);
}
}
#else
errorQuda("sendGhost not built on single-GPU build");
#endif
}
void cudaColorSpinorField::unpackGhost(const void* ghost_spinor, const int nFace,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t* stream)
{
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
void *dst = (char*)v + precision*offset;
const void *src = ghost_spinor;
cudaMemcpyAsync(dst, src, len*precision, cudaMemcpyHostToDevice, *stream);
if (precision == QUDA_HALF_PRECISION) {
// norm region of host ghost zone is at the end of the ghost_spinor
int normlen = nFace*ghostFace[dim];
int norm_offset = stride + ghostNormOffset[dim];
norm_offset += (dir == QUDA_BACKWARDS) ? 0 : normlen;
void *dst = static_cast<char*>(norm) + norm_offset*sizeof(float);
const void *src = static_cast<const char*>(ghost_spinor)+nFace*Nint*ghostFace[dim]*precision;
cudaMemcpyAsync(dst, src, normlen*sizeof(float), cudaMemcpyHostToDevice, *stream);
}
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhostExtended(const int nFace, const int R[], const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t *stream,
void *buffer)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFaceExtended(packBuffer, *this, nFace, R, dagger, parity, dim, face_num, *stream);
#else
errorQuda("packGhostExtended not built on single-GPU build");
#endif
}
// copy data from host buffer into boundary region of device field
void cudaColorSpinorField::unpackGhostExtended(const void* ghost_spinor, const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t* stream)
{
// First call the regular unpackGhost routine to copy data into the `usual' ghost-zone region
// of the data array
unpackGhost(ghost_spinor, nFace, dim, dir, dagger, stream);
// Next step is to copy data from the ghost zone back to the interior region
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
#ifdef MULTI_GPU
const int face_num = 2;
const bool unpack = true;
const int R[4] = {0,0,0,0};
packFaceExtended(ghostFaceBuffer[bufferIndex], *this, nFace, R, dagger, parity, dim, face_num, *stream, unpack);
#else
errorQuda("unpackGhostExtended not built on single-GPU build");
#endif
}
cudaStream_t *stream;
void cudaColorSpinorField::createComms(int nFace) {
if(bufferMessageHandler != bufferPinnedResizeCount) destroyComms();
if (!initComms || nFaceComms != nFace) {
// if we are requesting a new number of faces destroy and start over
if(nFace != nFaceComms) destroyComms();
if (siteSubset != QUDA_PARITY_SITE_SUBSET)
errorQuda("Only supports single parity fields");
#ifdef GPU_COMMS
bool comms = false;
for (int i=0; i<nDimComms; i++) if (commDimPartitioned(i)) comms = true;
#endif
if (nFace > maxNface)
errorQuda("Requested number of faces %d in communicator is greater than supported %d",
nFace, maxNface);
// faceBytes is the sum of all face sizes
size_t faceBytes = 0;
// nbytes is the size in bytes of each face
size_t nbytes[QUDA_MAX_DIM];
// The number of degrees of freedom per site for the given
// field. Currently assumes spin projection of a Wilson-like
// field (so half the number of degrees of freedom).
int Ndof = (2 * nSpin * nColor) / (nSpin==4 ? 2 : 1);
for (int i=0; i<nDimComms; i++) {
nbytes[i] = maxNface*surfaceCB[i]*Ndof*precision;
if (precision == QUDA_HALF_PRECISION) nbytes[i] += maxNface*surfaceCB[i]*sizeof(float);
if (!commDimPartitioned(i)) continue;
faceBytes += 2*nbytes[i];
}
#ifndef GPU_COMMS
// use static pinned memory for face buffers
for(int b=0; b<2; ++b){
resizeBufferPinned(2*faceBytes, b); // oversizes for GPU_COMMS case
my_face[b] = bufferPinned[b];
from_face[b] = static_cast<char*>(bufferPinned[b]) + faceBytes;
}
// assign pointers for each face - it's ok to alias for different Nface parameters
size_t offset = 0;
#endif
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_back_face[b][i] = backGhostFaceBuffer[b][i];
from_back_face[b][i] = ghost[i];
if(precision == QUDA_HALF_PRECISION){
my_back_norm_face[b][i] = static_cast<char*>(backGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_back_norm_face[b][i] = ghostNorm[i];
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_back_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_back_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = fwdGhostFaceBuffer[b][i];
from_fwd_face[b][i] = ghost[i] + nFace*ghostFace[i]*Ndof*precision;
if(precision == QUDA_HALF_PRECISION){
my_fwd_norm_face[b][i] = static_cast<char*>(fwdGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_fwd_norm_face[b][i] = static_cast<char*>(ghostNorm[i]) + nFace*ghostFace[i]*sizeof(float);
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_fwd_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
}
// create a different message handler for each direction and Nface
for(int b=0; b<2; ++b){
mh_send_fwd[b] = new MsgHandle**[maxNface];
mh_send_back[b] = new MsgHandle**[maxNface];
mh_recv_fwd[b] = new MsgHandle**[maxNface];
mh_recv_back[b] = new MsgHandle**[maxNface];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b] = new MsgHandle**[maxNface];
mh_send_norm_back[b] = new MsgHandle**[maxNface];
mh_recv_norm_fwd[b] = new MsgHandle**[maxNface];
mh_recv_norm_back[b] = new MsgHandle**[maxNface];
}
#endif
} // loop over b
for (int j=0; j<maxNface; j++) {
for(int b=0; b<2; ++b){
mh_send_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_back[b][j] = new MsgHandle*[nDimComms];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_norm_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_norm_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_norm_back[b][j] = new MsgHandle*[nDimComms];
}
#endif
} // loop over b
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
size_t nbytes_Nface = surfaceCB[i]*Ndof*precision*(j+1);
size_t nbytes_Nface_norm = surfaceCB[i]*(j+1)*sizeof(float);
if (i != 3 || getKernelPackT() || getTwistPack()) {
#else
size_t nbytes_Nface = (nbytes[i] / maxNface) * (j+1);
#endif
for(int b=0; b<2; ++b){
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_face[b][i], i, -1, nbytes_Nface) : NULL;
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i]; // alias pointers
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i]; // alias pointers
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
} else if (this->TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET) {
errorQuda("GPU_COMMS for non-degenerate doublet only supported with time-dimension kernel packing enabled.");
} else {
/*
use a strided communicator, here we can't really use
the previously declared my_fwd_face and my_back_face
pointers since they don't really map 1-to-1 so let's
just compute the required base pointers and pass these
directly into the communicator construction
*/
int Nblocks = Ndof / Nvec(); // number of Nvec buffers we have
// start of last time slice chunk we are sending forwards
int endOffset = (volume - (j+1)*ghostFace[i]);
size_t offset[4];
void *base[4];
if (nSpin == 1) { // staggered is invariant with dagger
offset[2*0 + 0] = 0;
offset[2*1 + 0] = endOffset;
offset[2*0 + 1] = offset[2*0 + 0];
offset[2*1 + 1] = offset[2*1 + 0];
} else if (nSpin == 4) {
// !dagger: send last components backwards, send first components forwards
offset[2*0 + 0] = Nblocks*stride;
offset[2*1 + 0] = endOffset;
// dagger: send first components backwards, send last components forwards
offset[2*0 + 1] = 0;
offset[2*1 + 1] = Nblocks*stride + endOffset;
} else {
errorQuda("Unsupported number of spin components");
}
for (int k=0; k<4; k++) {
base[k] = static_cast<char*>(v) + offset[k]*Nvec()*precision; // total offset in bytes
}
size_t blksize = (j+1)*ghostFace[i]*Nvec()*precision; // (j+1) is number of faces
size_t Stride = stride*Nvec()*precision;
if (blksize * Nblocks != nbytes_Nface)
errorQuda("Total strided message size does not match expected size");
//printf("%d strided sends with Nface=%d Nblocks=%d blksize=%d Stride=%d\n", i, j+1, Nblocks, blksize, Stride);
for(int b=0; b<2; ++b){
// only allocate a communicator for the present face (this needs cleaned up)
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[2], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[0], i, -1, blksize, Nblocks, Stride) : NULL;
if (nSpin ==4) { // dagger communicators
mh_send_fwd[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[3], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[1], i, -1, blksize, Nblocks, Stride) : NULL;
} else {
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i+0];
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i+0];
}
} // loop over b
if(precision == QUDA_HALF_PRECISION){
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // The space-time coordinate of the start of the last time slice
void *norm_fwd = static_cast<float*>(norm) + Nt_minus1_offset;
void *norm_back = norm; // the first time slice has zero offset
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_fwd, i, +1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_back, i, -1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
}
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_recv_norm_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_recv_norm_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
}
}
#endif // GPU_COMMS
for(int b=0; b<2; ++b){
mh_recv_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_recv_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_face[b][i], i, -1, nbytes_Nface) : NULL;
}
} // loop over dimension
}
bufferMessageHandler = bufferPinnedResizeCount;
initComms = true;
nFaceComms = nFace;
}
checkCudaError();
}
void cudaColorSpinorField::destroyComms() {
if (initComms) {
for(int b=0; b<2; ++b){
for (int j=0; j<maxNface; j++) {
for (int i=0; i<nDimComms; i++) {
if (commDimPartitioned(i)) {
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_fwd[b][j][i]);
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_back[b][j][i]);
if (mh_send_fwd[b][j][2*i]) comm_free(mh_send_fwd[b][j][2*i]);
if (mh_send_back[b][j][2*i]) comm_free(mh_send_back[b][j][2*i]);
// only in a special case are these not aliasing pointers
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
if (mh_recv_norm_fwd[b][j][i]) comm_free(mh_recv_norm_fwd[b][j][i]);
if (mh_recv_norm_back[b][j][i]) comm_free(mh_recv_norm_back[b][j][i]);
if (mh_send_norm_fwd[b][j][2*i]) comm_free(mh_send_norm_fwd[b][j][2*i]);
if (mh_send_norm_back[b][j][2*i]) comm_free(mh_send_norm_back[b][j][2*i]);
}
if (i == 3 && !getKernelPackT() && nSpin == 4) {
if (mh_send_fwd[b][j][2*i+1]) comm_free(mh_send_fwd[b][j][2*i+1]);
if (mh_send_back[b][j][2*i+1]) comm_free(mh_send_back[b][j][2*i+1]);
}
#endif // GPU_COMMS
}
}
delete []mh_recv_fwd[b][j];
delete []mh_recv_back[b][j];
delete []mh_send_fwd[b][j];
delete []mh_send_back[b][j];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b][j];
delete []mh_recv_norm_back[b][j];
delete []mh_send_norm_fwd[b][j];
delete []mh_send_norm_back[b][j];
}
#endif
}
delete []mh_recv_fwd[b];
delete []mh_recv_back[b];
delete []mh_send_fwd[b];
delete []mh_send_back[b];
for (int i=0; i<nDimComms; i++) {
my_fwd_face[b][i] = NULL;
my_back_face[b][i] = NULL;
from_fwd_face[b][i] = NULL;
from_back_face[b][i] = NULL;
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b];
delete []mh_recv_norm_back[b];
delete []mh_send_norm_fwd[b];
delete []mh_send_norm_back[b];
}
for(int i=0; i<nDimComms; i++){
my_fwd_norm_face[b][i] = NULL;
my_back_norm_face[b][i] = NULL;
from_fwd_norm_face[b][i] = NULL;
from_back_norm_face[b][i] = NULL;
}
#endif
} // loop over b
initComms = false;
checkCudaError();
}
}
void cudaColorSpinorField::streamInit(cudaStream_t *stream_p){
stream = stream_p;
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, cudaStream_t *stream_p,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
cudaHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, a, b);
}
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, int stream_idx,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
cudaHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], 0, a, b);
}
}
void cudaColorSpinorField::packExtended(const int nFace, const int R[], const int parity,
const int dagger, const int dim,
cudaStream_t *stream_p, const bool zeroCopyPack){
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
void *my_face_d = NULL;
if(zeroCopyPack){
cudaHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0);
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d);
}else{
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], my_face_d);
}
}
void cudaColorSpinorField::gather(int nFace, int dagger, int dir, cudaStream_t* stream_p)
{
int dim = dir/2;
// If stream_p != 0, use pack_stream, else use the stream array
cudaStream_t *pack_stream = (stream_p) ? stream_p : stream+dir;
if(dir%2 == 0){
// backwards copy to host
sendGhost(my_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, pack_stream);
} else {
// forwards copy to host
sendGhost(my_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, pack_stream);
}
}
void cudaColorSpinorField::recvStart(int nFace, int dir, int dagger) {
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
}
#endif
}
void cudaColorSpinorField::sendStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
void cudaColorSpinorField::commsStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
int cudaColorSpinorField::commsQuery(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return 0;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
#ifdef GPU_COMMS
}else{ // half precision
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
} // half precision
#endif
return 0;
}
void cudaColorSpinorField::commsWait(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
} else { // half precision
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
} // half precision
#endif
return;
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir, cudaStream_t* stream_p)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, stream_p);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, stream_p);
}
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
void cudaColorSpinorField::scatterExtended(int nFace, int parity, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
unpackGhostExtended(from_fwd_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhostExtended(from_back_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
// Return the location of the field
QudaFieldLocation cudaColorSpinorField::Location() const { return QUDA_CUDA_FIELD_LOCATION; }
std::ostream& operator<<(std::ostream &out, const cudaColorSpinorField &a) {
out << (const ColorSpinorField&)a;
out << "v = " << a.v << std::endl;
out << "norm = " << a.norm << std::endl;
out << "alloc = " << a.alloc << std::endl;
out << "init = " << a.init << std::endl;
return out;
}
//! for deflated solvers:
cudaColorSpinorField& cudaColorSpinorField::Eigenvec(const int idx) const {
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (idx < this->EigvDim()) {//setup eigenvector form the set
return *(dynamic_cast<cudaColorSpinorField*>(eigenvectors[idx]));
}
else{
errorQuda("Incorrect eigenvector index...");
}
}
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
//copyCuda currently cannot not work with set of spinor fields..
void cudaColorSpinorField::CopyEigenvecSubset(cudaColorSpinorField &dst, const int range, const int first_element) const{
#if 0
if(first_element < 0) errorQuda("\nError: trying to set negative first element.\n");
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (first_element == 0 && range == this->EigvDim())
{
if(range != dst.EigvDim())errorQuda("\nError: eigenvector range to big.\n");
checkField(dst, *this);
copyCuda(dst, *this);
}
else if ((first_element+range) < this->EigvDim())
{//setup eigenvector subset
cudaColorSpinorField *eigv_subset;
ColorSpinorParam param;
param.nColor = nColor;
param.nSpin = nSpin;
param.twistFlavor = twistFlavor;
param.precision = precision;
param.nDim = nDim;
param.pad = pad;
param.siteSubset = siteSubset;
param.siteOrder = siteOrder;
param.fieldOrder = fieldOrder;
param.gammaBasis = gammaBasis;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.eigv_dim = range;
param.eigv_id = -1;
param.v = (void*)((char*)v + first_element*eigv_bytes);
param.norm = (void*)((char*)norm + first_element*eigv_norm_bytes);
eigv_subset = new cudaColorSpinorField(param);
//Not really needed:
eigv_subset->eigenvectors.reserve(param.eigv_dim);
for(int id = first_element; id < (first_element+range); id++)
{
param.eigv_id = id;
eigv_subset->eigenvectors.push_back(new cudaColorSpinorField(*this, param));
}
checkField(dst, *eigv_subset);
copyCuda(dst, *eigv_subset);
delete eigv_subset;
}
else{
errorQuda("Incorrect eigenvector dimension...");
}
}
else{
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
#endif
}
void cudaColorSpinorField::getTexObjectInfo() const
{
#ifdef USE_TEXTURE_OBJECTS
printfQuda("\nPrint texture info for the field:\n");
std::cout << *this;
cudaResourceDesc resDesc;
//memset(&resDesc, 0, sizeof(resDesc));
cudaGetTextureObjectResourceDesc(&resDesc, this->Tex());
printfQuda("\nDevice pointer: %p\n", resDesc.res.linear.devPtr);
printfQuda("\nVolume (in bytes): %d\n", resDesc.res.linear.sizeInBytes);
if (resDesc.resType == cudaResourceTypeLinear) printfQuda("\nResource type: linear \n");
checkCudaError();
#endif
}
} // namespace quda
|
6f987e0c9b00aeec01200a35d92f45936f0d7993.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define COMPLEX
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
#ifdef COMPLEX
void magmablas_zlacgv( magma_int_t n, magmaDoubleComplex *x, magma_int_t incx, magma_queue_t queue );
#endif
__global__ void
magma_zgeru_1(
int n, int k, int kp, int nrhs,
magmaDoubleComplex *A, magmaDoubleComplex *B, int lddb)
{
int tx = threadIdx.x + 64 * blockIdx.x;
if (k+1+tx < n)
if (k!=kp && k+1+tx == kp)
// if k <-> kp pivoting, B[k] holds the result for B[kp]
B[k] -= A[kp]*B[kp];
else
B[k+1+tx] -= A[k+1+tx]*B[kp];
}
__global__ void
magma_zswap_scal(
int k, int kp, int nrhs,
magmaDoubleComplex *A, magmaDoubleComplex *B, int lddb)
{
magmaDoubleComplex tmp;
if (k != kp){
tmp = B[k];
B[k] = B[kp];
B[kp] = tmp;
}
B[k] *= MAGMA_Z_DIV(MAGMA_Z_ONE, A[k]);
}
__global__ void
magma_zgeru_2(
int n, int k, int kp, int nrhs,
magmaDoubleComplex *A, int ldda, magmaDoubleComplex *B, int lddb)
{
int tx = threadIdx.x + 64 * blockIdx.x;
if (k+2+tx < n)
if (k+1!=kp && k+2+tx == kp)
// if k+1 <-> kp pivoting, B[k+1] holds the result for B[kp]
B[k+1] -= A[kp]*B[k] + A[kp+ldda]*B[kp];
else
B[k+2+tx] -= A[k+2+tx]*B[k] + A[k+2+tx+ldda]*B[kp];
}
__global__ void
magma_zswap_scal_inverseblock_lower(
int k, int kp, int nrhs,
magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb)
{
int tx = threadIdx.x;
magmaDoubleComplex tmp;
if (k+1 != kp){
tmp = *dB(k+1,tx);
*dB(k+1,tx) = *dB(kp ,tx);
*dB( kp,tx) = tmp;
}
magmaDoubleComplex AKM1K = *dA(1,0);
magmaDoubleComplex AKM1 = MAGMA_Z_DIV(*dA(0,0), MAGMA_Z_CONJ( AKM1K ) );
magmaDoubleComplex AK = MAGMA_Z_DIV(*dA(1,1), AKM1K );
magmaDoubleComplex DENOM = AKM1*AK - MAGMA_Z_ONE;
magmaDoubleComplex BKM1 = MAGMA_Z_DIV( *dB(k,tx), MAGMA_Z_CONJ(AKM1K));
magmaDoubleComplex BK = MAGMA_Z_DIV( *dB(k+1,tx), AKM1K );
*dB(k,tx) = MAGMA_Z_DIV( AK*BKM1-BK , DENOM );
*dB(k+1,tx) = MAGMA_Z_DIV( AKM1*BK-BKM1, DENOM );
}
// This kernel scales the array B by 1/alpha.
// The kernel is called on one thread block with thread equal the
// length of B, so that each thread scales just one element of B.
__global__ void
magmablas_zdscal_inverse(
magmaDoubleComplex *alpha,
magmaDoubleComplex *B, int ldb)
{
int tx = threadIdx.x;
magmaDoubleComplex scale = MAGMA_Z_DIV(MAGMA_Z_ONE, *alpha);
B[tx*ldb] *= scale;
}
// Multiply array dB of size 2 by the inverse of the 2x2 diagonal block at dA.
// This is a batch operation where each thread is doing one multiplication.
__global__ void
magmablas_zdscal_inverseblock_upper(
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb)
{
int tx = threadIdx.x;
magmaDoubleComplex AKM1K = *dA(0,1);
magmaDoubleComplex AKM1 = MAGMA_Z_DIV(*dA(0,0), AKM1K);
magmaDoubleComplex AK = MAGMA_Z_DIV(*dA(1,1), MAGMA_Z_CONJ( AKM1K ));
magmaDoubleComplex DENOM = AKM1*AK - MAGMA_Z_ONE;
magmaDoubleComplex BKM1 = MAGMA_Z_DIV( *dB(0,tx), AKM1K);
magmaDoubleComplex BK = MAGMA_Z_DIV( *dB(1,tx), MAGMA_Z_CONJ(AKM1K) );
*dB(0,tx) = MAGMA_Z_DIV( AK*BKM1-BK , DENOM );
*dB(1,tx) = MAGMA_Z_DIV( AKM1*BK-BKM1, DENOM );
}
__global__ void
magmablas_zdscal_inverseblock_lower(
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb)
{
int tx = threadIdx.x;
magmaDoubleComplex AKM1K = *dA(1,0);
magmaDoubleComplex AKM1 = MAGMA_Z_DIV(*dA(0,0), MAGMA_Z_CONJ( AKM1K ) );
magmaDoubleComplex AK = MAGMA_Z_DIV(*dA(1,1), AKM1K );
magmaDoubleComplex DENOM = AKM1*AK - MAGMA_Z_ONE;
magmaDoubleComplex BKM1 = MAGMA_Z_DIV( *dB(0,tx), MAGMA_Z_CONJ(AKM1K));
magmaDoubleComplex BK = MAGMA_Z_DIV( *dB(1,tx), AKM1K );
*dB(0,tx) = MAGMA_Z_DIV( AK*BKM1-BK , DENOM );
*dB(1,tx) = MAGMA_Z_DIV( AKM1*BK-BKM1, DENOM );
}
/***************************************************************************//**
Purpose
-------
ZHETRS solves a system of linear equations dA*dX = dB with a complex
Hermitian matrix dA using the factorization dA = dU*dD*dU**H or
dA = dL*dD*dL**H computed by ZHETRF_GPU.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the details of the factorization are stored
as an upper or lower triangular matrix.
= MagmaUpper: Upper triangular, form is A = U*D*U**H;
= MagmaLower: Lower triangular, form is A = L*D*L**H.
@param[in]
n INTEGER
The order of the matrix dA. N >= 0.
@param[in]
nrhs INTEGER
The number of right hand sides, i.e., the number of columns
of the matrix dB. NRHS >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDA,N)
The block diagonal matrix D and the multipliers used to
obtain the factor U or L as computed by ZHETRF_GPU.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,N).
@param[in]
ipiv INTEGER array, dimension (N)
Details of the interchanges and the block structure of D
as determined by ZHETRF.
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB, NRHS)
On entry, the right hand side matrix dB.
On exit, the solution matrix dX.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,N).
@param[out]
info INTEGER
= 0: successful exit
< 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hetrs
*******************************************************************************/
extern "C" magma_int_t
magma_zhetrs_gpu(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs,
magmaDoubleComplex *dA, magma_int_t ldda,
magma_int_t *ipiv,
magmaDoubleComplex *dB, magma_int_t lddb,
magma_int_t *info,
magma_queue_t queue )
{
/* Constants */
const magmaDoubleComplex c_one = MAGMA_Z_ONE;
const magmaDoubleComplex c_neg_one = MAGMA_Z_NEG_ONE;
/* Local variables */
int k, kp;
bool upper = (uplo == MagmaUpper);
/* Test the input parameters. */
*info = 0;
if ( ! upper && uplo != MagmaLower ) {
*info = -1;
} else if ( n < 0 ) {
*info = -2;
} else if ( nrhs < 0 ) {
*info = -3;
} else if ( ldda < max( 1, n ) ) {
*info = -5;
} else if ( lddb < max( 1, n ) ) {
*info = -8;
}
if ( *info != 0 ) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Quick return if possible */
if (n == 0 || nrhs == 0) {
return *info;
}
if (upper) {
/* Solve A*X = B, where A = U*D*U'.
First solve U*D*X = B, overwriting B with X.
K is the main loop index, decreasing from N to 1 in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k = n-1; k > -1; k--) {
if ( ipiv[k] > 0 ) {
/* 1 x 1 diagonal block.
Interchange rows k and ipiv(k). */
kp = ipiv[k]-1;
if ( kp != k )
magma_zswap(nrhs, dB(k, 0), lddb, dB(kp, 0), lddb, queue);
/* Multiply by inv(U(K)), where U(K) is the transformation
stored in column K of A. */
magma_zgeru(k, nrhs, c_neg_one, dA(0,k), 1, dB(k,0), lddb, dB(0,0), lddb, queue);
/* Multiply by the inverse of the diagonal block. */
hipLaunchKernelGGL(( magmablas_zdscal_inverse), dim3(1), dim3(nrhs), 0, queue->cuda_stream(),
dA(k,k), dB(k,0), lddb);
}
else {
/* 2 x 2 diagonal block
Interchange rows K-1 and -IPIV(K). */
kp = -ipiv[k]-1;
if ( kp != k-1 )
magma_zswap(nrhs, dB(k-1,0), lddb, dB(kp,0), lddb, queue);
/* Multiply by inv(U(K)), where U(K) is the transformation
stored in columns K-1 and K of A. */
magma_zgeru(k-1, nrhs, c_neg_one, dA(0,k ), 1, dB(k ,0), lddb, dB, lddb, queue);
magma_zgeru(k-1, nrhs, c_neg_one, dA(0,k-1), 1, dB(k-1,0), lddb, dB, lddb, queue);
/* Multiply by the inverse of the diagonal block. */
hipLaunchKernelGGL(( magmablas_zdscal_inverseblock_upper), dim3(1), dim3(nrhs), 0, queue->cuda_stream(),
dA(k-1,k-1), ldda, dB(k-1,0), lddb);
/* reduce k once more for the 2 x 2 block */
k--;
}
}
/* Next solve U'*X = B, overwriting B with X.
K is the main loop index, increasing from 1 to N in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k=0; k<n; k++) {
if ( ipiv[k] > 0) {
/* 1 x 1 diagonal block
Multiply by inv(U'(K)), where U(K) is the transformation
stored in column K of A. */
if (k > 0) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, k, nrhs, c_neg_one, dB, lddb,
dA(0, k), 1, c_one, dB(k, 0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv( nrhs, dB(k,0), lddb, queue);
#endif
}
/* Interchange rows K and IPIV(K). */
kp = ipiv[k]-1;
if( kp != k )
magma_zswap(nrhs, dB(k, 0), lddb, dB(kp, 0), lddb, queue);
}
else {
/* 2 x 2 diagonal block
Multiply by inv(U'(K+1)), where U(K+1) is the transformation
stored in columns K and K+1 of A. */
if (k > 0) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, k, nrhs, c_neg_one, dB, lddb,
dA(0, k), 1, c_one, dB(k,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k+1,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, k, nrhs, c_neg_one, dB, lddb,
dA(0, k+1), 1, c_one, dB(k+1, 0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k+1,0), lddb, queue);
#endif
}
/* Interchange rows K and -IPIV(K). */
kp = -ipiv[k]-1;
if ( kp != k )
magma_zswap(nrhs, dB(k, 0), lddb, dB(kp, 0), lddb, queue);
/* increase k one more for the 2 x 2 block */
k++;
}
}
} else {
/* Solve A*X = B, where A = L*D*L'.
First solve L*D*X = B, overwriting B with X.
K is the main loop index, increasing from 1 to N in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k=0; k<n; k++) {
if ( ipiv[k] > 0) {
/* 1 x 1 diagonal block
Interchange rows K and IPIV(K). */
kp = ipiv[k]-1;
if (0){
if ( kp != k )
magma_zswap(nrhs, dB(k,0), lddb, dB(kp,0), lddb, queue);
/* Multiply by inv(L(K)), where L(K) is the transformation
stored in column K of A. */
if (k < n-1)
magma_zgeru(n-k-1, nrhs, c_neg_one, dA(k+1, k), 1, dB(k,0), lddb,
dB(k+1, 0), lddb, queue);
/* Multiply by the inverse of the diagonal block. */
hipLaunchKernelGGL(( magmablas_zdscal_inverse), dim3(1), dim3(nrhs), 0, queue->cuda_stream(),
dA(k,k), dB(k,0), lddb);
}
else {
hipLaunchKernelGGL(( magma_zgeru_1), dim3(magma_ceildiv(n-k-1,64)), dim3(64), 0, queue->cuda_stream(),
n, k, kp, nrhs, dA(0,k), dB, lddb);
hipLaunchKernelGGL(( magma_zswap_scal), dim3(1), dim3(nrhs), 0, queue->cuda_stream(),
k, kp, nrhs, dA(0,k), dB, lddb);
}
}
else {
/* 2 x 2 diagonal block
Interchange rows K+1 and -IPIV(K). */
kp = -ipiv[k]-1;
if (0) {
if ( kp != k+1 )
magma_zswap(nrhs, dB(k+1,0), lddb, dB(kp,0), lddb, queue);
/* Multiply by inv(L(K)), where L(K) is the transformation
stored in columns K and K+1 of A. */
if ( k < n-2 ) {
magma_zgeru(n-k-2, nrhs, c_neg_one, dA(k+2,k), 1, dB(k,0), lddb,
dB(k+2,0), lddb, queue);
magma_zgeru(n-k-2, nrhs, c_neg_one, dA(k+2,k+1), 1, dB(k+1,0), lddb,
dB(k+2,0), lddb, queue);
}
/* Multiply by the inverse of the diagonal block. */
hipLaunchKernelGGL(( magmablas_zdscal_inverseblock_lower), dim3(1), dim3(nrhs), 0, queue->cuda_stream(),
dA(k,k), ldda, dB(k,0), lddb);
}
else {
hipLaunchKernelGGL(( magma_zgeru_2), dim3(magma_ceildiv(n-k-2,64)), dim3(64), 0, queue->cuda_stream(),
n, k, kp, nrhs, dA(0,k), ldda, dB, lddb);
hipLaunchKernelGGL(( magma_zswap_scal_inverseblock_lower), dim3(1), dim3(nrhs), 0, queue->cuda_stream(),
k, kp, nrhs, dA(k,k), ldda, dB(0, 0), lddb);
}
/* increase k one more for the 2 x 2 block */
k++;
}
}
/* Next solve L'*X = B, overwriting B with X.
K is the main loop index, decreasing from N to 1 in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k = n-1; k > -1; k--) {
if ( ipiv[k] > 0 ) {
/* 1 x 1 diagonal block.
Multiply by inv(L'(K)), where L(K) is the transformation
stored in column K of A. */
if (1){
if ( k < n-1 ) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, n-k-1, nrhs, c_neg_one,
dB(k+1,0), lddb, dA(k+1,k), 1, c_one,
dB(k,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
}
/* Interchange rows K and IPIV(K). */
kp = ipiv[k]-1;
if ( kp!=k )
magma_zswap(nrhs, dB(k,0), lddb, dB(kp,0), lddb, queue);
}
else {
}
}
else {
/* 2 x 2 diagonal block
Multiply by inv(L'(K-1)), where L(K-1) is the transformation
stored in columns K-1 and K of A. */
if ( k < n-1 ) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, n-k-1, nrhs, c_neg_one,
dB(k+1,0), lddb, dA(k+1,k), 1, c_one,
dB(k,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k-1,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, n-k-1, nrhs, c_neg_one,
dB(k+1,0), lddb, dA(k+1,k-1), 1, c_one,
dB(k-1,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k-1,0), lddb, queue);
#endif
}
/* Interchange rows K and -IPIV(K).*/
kp = -ipiv[k]-1;
if ( kp != k )
magma_zswap(nrhs, dB(k,0), lddb, dB(kp,0), lddb, queue);
/* reduce k once more for the 2 x 2 block */
k--;
}
}
}
return *info;
}
| 6f987e0c9b00aeec01200a35d92f45936f0d7993.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define COMPLEX
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
#ifdef COMPLEX
void magmablas_zlacgv( magma_int_t n, magmaDoubleComplex *x, magma_int_t incx, magma_queue_t queue );
#endif
__global__ void
magma_zgeru_1(
int n, int k, int kp, int nrhs,
magmaDoubleComplex *A, magmaDoubleComplex *B, int lddb)
{
int tx = threadIdx.x + 64 * blockIdx.x;
if (k+1+tx < n)
if (k!=kp && k+1+tx == kp)
// if k <-> kp pivoting, B[k] holds the result for B[kp]
B[k] -= A[kp]*B[kp];
else
B[k+1+tx] -= A[k+1+tx]*B[kp];
}
__global__ void
magma_zswap_scal(
int k, int kp, int nrhs,
magmaDoubleComplex *A, magmaDoubleComplex *B, int lddb)
{
magmaDoubleComplex tmp;
if (k != kp){
tmp = B[k];
B[k] = B[kp];
B[kp] = tmp;
}
B[k] *= MAGMA_Z_DIV(MAGMA_Z_ONE, A[k]);
}
__global__ void
magma_zgeru_2(
int n, int k, int kp, int nrhs,
magmaDoubleComplex *A, int ldda, magmaDoubleComplex *B, int lddb)
{
int tx = threadIdx.x + 64 * blockIdx.x;
if (k+2+tx < n)
if (k+1!=kp && k+2+tx == kp)
// if k+1 <-> kp pivoting, B[k+1] holds the result for B[kp]
B[k+1] -= A[kp]*B[k] + A[kp+ldda]*B[kp];
else
B[k+2+tx] -= A[k+2+tx]*B[k] + A[k+2+tx+ldda]*B[kp];
}
__global__ void
magma_zswap_scal_inverseblock_lower(
int k, int kp, int nrhs,
magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb)
{
int tx = threadIdx.x;
magmaDoubleComplex tmp;
if (k+1 != kp){
tmp = *dB(k+1,tx);
*dB(k+1,tx) = *dB(kp ,tx);
*dB( kp,tx) = tmp;
}
magmaDoubleComplex AKM1K = *dA(1,0);
magmaDoubleComplex AKM1 = MAGMA_Z_DIV(*dA(0,0), MAGMA_Z_CONJ( AKM1K ) );
magmaDoubleComplex AK = MAGMA_Z_DIV(*dA(1,1), AKM1K );
magmaDoubleComplex DENOM = AKM1*AK - MAGMA_Z_ONE;
magmaDoubleComplex BKM1 = MAGMA_Z_DIV( *dB(k,tx), MAGMA_Z_CONJ(AKM1K));
magmaDoubleComplex BK = MAGMA_Z_DIV( *dB(k+1,tx), AKM1K );
*dB(k,tx) = MAGMA_Z_DIV( AK*BKM1-BK , DENOM );
*dB(k+1,tx) = MAGMA_Z_DIV( AKM1*BK-BKM1, DENOM );
}
// This kernel scales the array B by 1/alpha.
// The kernel is called on one thread block with thread equal the
// length of B, so that each thread scales just one element of B.
__global__ void
magmablas_zdscal_inverse(
magmaDoubleComplex *alpha,
magmaDoubleComplex *B, int ldb)
{
int tx = threadIdx.x;
magmaDoubleComplex scale = MAGMA_Z_DIV(MAGMA_Z_ONE, *alpha);
B[tx*ldb] *= scale;
}
// Multiply array dB of size 2 by the inverse of the 2x2 diagonal block at dA.
// This is a batch operation where each thread is doing one multiplication.
__global__ void
magmablas_zdscal_inverseblock_upper(
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb)
{
int tx = threadIdx.x;
magmaDoubleComplex AKM1K = *dA(0,1);
magmaDoubleComplex AKM1 = MAGMA_Z_DIV(*dA(0,0), AKM1K);
magmaDoubleComplex AK = MAGMA_Z_DIV(*dA(1,1), MAGMA_Z_CONJ( AKM1K ));
magmaDoubleComplex DENOM = AKM1*AK - MAGMA_Z_ONE;
magmaDoubleComplex BKM1 = MAGMA_Z_DIV( *dB(0,tx), AKM1K);
magmaDoubleComplex BK = MAGMA_Z_DIV( *dB(1,tx), MAGMA_Z_CONJ(AKM1K) );
*dB(0,tx) = MAGMA_Z_DIV( AK*BKM1-BK , DENOM );
*dB(1,tx) = MAGMA_Z_DIV( AKM1*BK-BKM1, DENOM );
}
__global__ void
magmablas_zdscal_inverseblock_lower(
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb)
{
int tx = threadIdx.x;
magmaDoubleComplex AKM1K = *dA(1,0);
magmaDoubleComplex AKM1 = MAGMA_Z_DIV(*dA(0,0), MAGMA_Z_CONJ( AKM1K ) );
magmaDoubleComplex AK = MAGMA_Z_DIV(*dA(1,1), AKM1K );
magmaDoubleComplex DENOM = AKM1*AK - MAGMA_Z_ONE;
magmaDoubleComplex BKM1 = MAGMA_Z_DIV( *dB(0,tx), MAGMA_Z_CONJ(AKM1K));
magmaDoubleComplex BK = MAGMA_Z_DIV( *dB(1,tx), AKM1K );
*dB(0,tx) = MAGMA_Z_DIV( AK*BKM1-BK , DENOM );
*dB(1,tx) = MAGMA_Z_DIV( AKM1*BK-BKM1, DENOM );
}
/***************************************************************************//**
Purpose
-------
ZHETRS solves a system of linear equations dA*dX = dB with a complex
Hermitian matrix dA using the factorization dA = dU*dD*dU**H or
dA = dL*dD*dL**H computed by ZHETRF_GPU.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies whether the details of the factorization are stored
as an upper or lower triangular matrix.
= MagmaUpper: Upper triangular, form is A = U*D*U**H;
= MagmaLower: Lower triangular, form is A = L*D*L**H.
@param[in]
n INTEGER
The order of the matrix dA. N >= 0.
@param[in]
nrhs INTEGER
The number of right hand sides, i.e., the number of columns
of the matrix dB. NRHS >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDA,N)
The block diagonal matrix D and the multipliers used to
obtain the factor U or L as computed by ZHETRF_GPU.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,N).
@param[in]
ipiv INTEGER array, dimension (N)
Details of the interchanges and the block structure of D
as determined by ZHETRF.
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB, NRHS)
On entry, the right hand side matrix dB.
On exit, the solution matrix dX.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,N).
@param[out]
info INTEGER
= 0: successful exit
< 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hetrs
*******************************************************************************/
extern "C" magma_int_t
magma_zhetrs_gpu(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs,
magmaDoubleComplex *dA, magma_int_t ldda,
magma_int_t *ipiv,
magmaDoubleComplex *dB, magma_int_t lddb,
magma_int_t *info,
magma_queue_t queue )
{
/* Constants */
const magmaDoubleComplex c_one = MAGMA_Z_ONE;
const magmaDoubleComplex c_neg_one = MAGMA_Z_NEG_ONE;
/* Local variables */
int k, kp;
bool upper = (uplo == MagmaUpper);
/* Test the input parameters. */
*info = 0;
if ( ! upper && uplo != MagmaLower ) {
*info = -1;
} else if ( n < 0 ) {
*info = -2;
} else if ( nrhs < 0 ) {
*info = -3;
} else if ( ldda < max( 1, n ) ) {
*info = -5;
} else if ( lddb < max( 1, n ) ) {
*info = -8;
}
if ( *info != 0 ) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Quick return if possible */
if (n == 0 || nrhs == 0) {
return *info;
}
if (upper) {
/* Solve A*X = B, where A = U*D*U'.
First solve U*D*X = B, overwriting B with X.
K is the main loop index, decreasing from N to 1 in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k = n-1; k > -1; k--) {
if ( ipiv[k] > 0 ) {
/* 1 x 1 diagonal block.
Interchange rows k and ipiv(k). */
kp = ipiv[k]-1;
if ( kp != k )
magma_zswap(nrhs, dB(k, 0), lddb, dB(kp, 0), lddb, queue);
/* Multiply by inv(U(K)), where U(K) is the transformation
stored in column K of A. */
magma_zgeru(k, nrhs, c_neg_one, dA(0,k), 1, dB(k,0), lddb, dB(0,0), lddb, queue);
/* Multiply by the inverse of the diagonal block. */
magmablas_zdscal_inverse<<<1, nrhs, 0, queue->cuda_stream()>>>
(dA(k,k), dB(k,0), lddb);
}
else {
/* 2 x 2 diagonal block
Interchange rows K-1 and -IPIV(K). */
kp = -ipiv[k]-1;
if ( kp != k-1 )
magma_zswap(nrhs, dB(k-1,0), lddb, dB(kp,0), lddb, queue);
/* Multiply by inv(U(K)), where U(K) is the transformation
stored in columns K-1 and K of A. */
magma_zgeru(k-1, nrhs, c_neg_one, dA(0,k ), 1, dB(k ,0), lddb, dB, lddb, queue);
magma_zgeru(k-1, nrhs, c_neg_one, dA(0,k-1), 1, dB(k-1,0), lddb, dB, lddb, queue);
/* Multiply by the inverse of the diagonal block. */
magmablas_zdscal_inverseblock_upper<<<1, nrhs, 0, queue->cuda_stream()>>>
(dA(k-1,k-1), ldda, dB(k-1,0), lddb);
/* reduce k once more for the 2 x 2 block */
k--;
}
}
/* Next solve U'*X = B, overwriting B with X.
K is the main loop index, increasing from 1 to N in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k=0; k<n; k++) {
if ( ipiv[k] > 0) {
/* 1 x 1 diagonal block
Multiply by inv(U'(K)), where U(K) is the transformation
stored in column K of A. */
if (k > 0) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, k, nrhs, c_neg_one, dB, lddb,
dA(0, k), 1, c_one, dB(k, 0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv( nrhs, dB(k,0), lddb, queue);
#endif
}
/* Interchange rows K and IPIV(K). */
kp = ipiv[k]-1;
if( kp != k )
magma_zswap(nrhs, dB(k, 0), lddb, dB(kp, 0), lddb, queue);
}
else {
/* 2 x 2 diagonal block
Multiply by inv(U'(K+1)), where U(K+1) is the transformation
stored in columns K and K+1 of A. */
if (k > 0) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, k, nrhs, c_neg_one, dB, lddb,
dA(0, k), 1, c_one, dB(k,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k+1,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, k, nrhs, c_neg_one, dB, lddb,
dA(0, k+1), 1, c_one, dB(k+1, 0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k+1,0), lddb, queue);
#endif
}
/* Interchange rows K and -IPIV(K). */
kp = -ipiv[k]-1;
if ( kp != k )
magma_zswap(nrhs, dB(k, 0), lddb, dB(kp, 0), lddb, queue);
/* increase k one more for the 2 x 2 block */
k++;
}
}
} else {
/* Solve A*X = B, where A = L*D*L'.
First solve L*D*X = B, overwriting B with X.
K is the main loop index, increasing from 1 to N in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k=0; k<n; k++) {
if ( ipiv[k] > 0) {
/* 1 x 1 diagonal block
Interchange rows K and IPIV(K). */
kp = ipiv[k]-1;
if (0){
if ( kp != k )
magma_zswap(nrhs, dB(k,0), lddb, dB(kp,0), lddb, queue);
/* Multiply by inv(L(K)), where L(K) is the transformation
stored in column K of A. */
if (k < n-1)
magma_zgeru(n-k-1, nrhs, c_neg_one, dA(k+1, k), 1, dB(k,0), lddb,
dB(k+1, 0), lddb, queue);
/* Multiply by the inverse of the diagonal block. */
magmablas_zdscal_inverse<<<1, nrhs, 0, queue->cuda_stream()>>>
(dA(k,k), dB(k,0), lddb);
}
else {
magma_zgeru_1<<<magma_ceildiv(n-k-1,64), 64, 0, queue->cuda_stream()>>>
(n, k, kp, nrhs, dA(0,k), dB, lddb);
magma_zswap_scal<<<1, nrhs, 0, queue->cuda_stream()>>>
(k, kp, nrhs, dA(0,k), dB, lddb);
}
}
else {
/* 2 x 2 diagonal block
Interchange rows K+1 and -IPIV(K). */
kp = -ipiv[k]-1;
if (0) {
if ( kp != k+1 )
magma_zswap(nrhs, dB(k+1,0), lddb, dB(kp,0), lddb, queue);
/* Multiply by inv(L(K)), where L(K) is the transformation
stored in columns K and K+1 of A. */
if ( k < n-2 ) {
magma_zgeru(n-k-2, nrhs, c_neg_one, dA(k+2,k), 1, dB(k,0), lddb,
dB(k+2,0), lddb, queue);
magma_zgeru(n-k-2, nrhs, c_neg_one, dA(k+2,k+1), 1, dB(k+1,0), lddb,
dB(k+2,0), lddb, queue);
}
/* Multiply by the inverse of the diagonal block. */
magmablas_zdscal_inverseblock_lower<<<1, nrhs, 0, queue->cuda_stream()>>>
(dA(k,k), ldda, dB(k,0), lddb);
}
else {
magma_zgeru_2<<<magma_ceildiv(n-k-2,64), 64, 0, queue->cuda_stream()>>>
(n, k, kp, nrhs, dA(0,k), ldda, dB, lddb);
magma_zswap_scal_inverseblock_lower<<<1, nrhs, 0, queue->cuda_stream()>>>
(k, kp, nrhs, dA(k,k), ldda, dB(0, 0), lddb);
}
/* increase k one more for the 2 x 2 block */
k++;
}
}
/* Next solve L'*X = B, overwriting B with X.
K is the main loop index, decreasing from N to 1 in steps of
1 or 2, depending on the size of the diagonal blocks. */
for(k = n-1; k > -1; k--) {
if ( ipiv[k] > 0 ) {
/* 1 x 1 diagonal block.
Multiply by inv(L'(K)), where L(K) is the transformation
stored in column K of A. */
if (1){
if ( k < n-1 ) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, n-k-1, nrhs, c_neg_one,
dB(k+1,0), lddb, dA(k+1,k), 1, c_one,
dB(k,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
}
/* Interchange rows K and IPIV(K). */
kp = ipiv[k]-1;
if ( kp!=k )
magma_zswap(nrhs, dB(k,0), lddb, dB(kp,0), lddb, queue);
}
else {
}
}
else {
/* 2 x 2 diagonal block
Multiply by inv(L'(K-1)), where L(K-1) is the transformation
stored in columns K-1 and K of A. */
if ( k < n-1 ) {
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, n-k-1, nrhs, c_neg_one,
dB(k+1,0), lddb, dA(k+1,k), 1, c_one,
dB(k,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k,0), lddb, queue);
#endif
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k-1,0), lddb, queue);
#endif
magma_zgemv(MagmaConjTrans, n-k-1, nrhs, c_neg_one,
dB(k+1,0), lddb, dA(k+1,k-1), 1, c_one,
dB(k-1,0), lddb, queue);
#ifdef COMPLEX
magmablas_zlacgv(nrhs, dB(k-1,0), lddb, queue);
#endif
}
/* Interchange rows K and -IPIV(K).*/
kp = -ipiv[k]-1;
if ( kp != k )
magma_zswap(nrhs, dB(k,0), lddb, dB(kp,0), lddb, queue);
/* reduce k once more for the 2 x 2 block */
k--;
}
}
}
return *info;
}
|
4c7223030248a267b44ddf6692547c04d0032c68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/types.hpp>
#include <cudf/utilities/error.hpp>
#include <thrust/pair.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
namespace cudf {
namespace io {
namespace {
// When processing the input in chunks, this is the maximum size of each chunk.
// Only one chunk is loaded on the GPU at a time, so this value is chosen to
// be small enough to fit on the GPU in most cases.
constexpr size_t max_chunk_bytes = 256 * 1024 * 1024; // 256MB
constexpr int bytes_per_find_thread = 64;
using pos_key_pair = thrust::pair<uint64_t, char>;
template <typename T>
constexpr T divCeil(T dividend, T divisor) noexcept
{
return (dividend + divisor - 1) / divisor;
}
/**
* @brief Sets the specified element of the array to the passed value
*/
template <class T, class V>
__device__ __forceinline__ void setElement(T* array, cudf::size_type idx, const T& t, const V&)
{
array[idx] = t;
}
/**
* @brief Sets the specified element of the array of pairs using the two passed
* parameters.
*/
template <class T, class V>
__device__ __forceinline__ void setElement(thrust::pair<T, V>* array,
cudf::size_type idx,
const T& t,
const V& v)
{
array[idx] = {t, v};
}
/**
* @brief Overloads the setElement() functions for void* arrays.
* Does not do anything, indexing is not allowed with void* arrays.
*/
template <class T, class V>
__device__ __forceinline__ void setElement(void*, cudf::size_type, const T&, const V&)
{
}
/**
* @brief CUDA kernel that finds all occurrences of a character in the given
* character array. If the 'positions' parameter is not void*,
* positions of all occurrences are stored in the output array.
*
* @param[in] data Pointer to the input character array
* @param[in] size Number of bytes in the input array
* @param[in] offset Offset to add to the output positions
* @param[in] key Character to find in the array
* @param[in,out] count Pointer to the number of found occurrences
* @param[out] positions Array containing the output positions
*/
template <class T>
__global__ void count_and_set_positions(const char* data,
uint64_t size,
uint64_t offset,
const char key,
cudf::size_type* count,
T* positions)
{
// thread IDs range per block, so also need the block id
const uint64_t tid = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t did = tid * bytes_per_find_thread;
const char* raw = (data + did);
const long byteToProcess =
((did + bytes_per_find_thread) < size) ? bytes_per_find_thread : (size - did);
// Process the data
for (long i = 0; i < byteToProcess; i++) {
if (raw[i] == key) {
const auto idx = atomicAdd(count, (cudf::size_type)1);
setElement(positions, idx, did + offset + i, key);
}
}
}
} // namespace
template <class T>
cudf::size_type find_all_from_set(device_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
T* positions,
rmm::cuda_stream_view stream)
{
int block_size = 0; // suggested thread count to use
int min_grid_size = 0; // minimum block count required
CUDF_CUDA_TRY(
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, count_and_set_positions<T>));
const int grid_size = divCeil(data.size(), (size_t)block_size);
auto d_count = cudf::detail::make_zeroed_device_uvector_async<cudf::size_type>(1, stream);
for (char key : keys) {
hipLaunchKernelGGL(( count_and_set_positions<T>), dim3(grid_size), dim3(block_size), 0, stream.value(),
data.data(), data.size(), result_offset, key, d_count.data(), positions);
}
return cudf::detail::make_std_vector_sync(d_count, stream)[0];
}
template <class T>
cudf::size_type find_all_from_set(host_span<char const> data,
const std::vector<char>& keys,
uint64_t result_offset,
T* positions,
rmm::cuda_stream_view stream)
{
rmm::device_buffer d_chunk(::min(max_chunk_bytes, data.size()), stream);
auto d_count = cudf::detail::make_zeroed_device_uvector_async<cudf::size_type>(1, stream);
int block_size = 0; // suggested thread count to use
int min_grid_size = 0; // minimum block count required
CUDF_CUDA_TRY(
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, count_and_set_positions<T>));
const size_t chunk_count = divCeil(data.size(), max_chunk_bytes);
for (size_t ci = 0; ci < chunk_count; ++ci) {
const auto chunk_offset = ci * max_chunk_bytes;
const auto h_chunk = data.data() + chunk_offset;
const int chunk_bytes = ::min((size_t)(data.size() - ci * max_chunk_bytes), max_chunk_bytes);
const auto chunk_bits = divCeil(chunk_bytes, bytes_per_find_thread);
const int grid_size = divCeil(chunk_bits, block_size);
// Copy chunk to device
CUDF_CUDA_TRY(
hipMemcpyAsync(d_chunk.data(), h_chunk, chunk_bytes, hipMemcpyDefault, stream.value()));
for (char key : keys) {
hipLaunchKernelGGL(( count_and_set_positions<T>)
, dim3(grid_size), dim3(block_size), 0, stream.value(), static_cast<char*>(d_chunk.data()),
chunk_bytes,
chunk_offset + result_offset,
key,
d_count.data(),
positions);
}
}
return cudf::detail::make_std_vector_sync(d_count, stream)[0];
}
template cudf::size_type find_all_from_set<uint64_t>(device_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
uint64_t* positions,
rmm::cuda_stream_view stream);
template cudf::size_type find_all_from_set<pos_key_pair>(device_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
pos_key_pair* positions,
rmm::cuda_stream_view stream);
template cudf::size_type find_all_from_set<uint64_t>(host_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
uint64_t* positions,
rmm::cuda_stream_view stream);
template cudf::size_type find_all_from_set<pos_key_pair>(host_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
pos_key_pair* positions,
rmm::cuda_stream_view stream);
cudf::size_type count_all_from_set(device_span<char const> data,
std::vector<char> const& keys,
rmm::cuda_stream_view stream)
{
return find_all_from_set<void>(data, keys, 0, nullptr, stream);
}
cudf::size_type count_all_from_set(host_span<char const> data,
const std::vector<char>& keys,
rmm::cuda_stream_view stream)
{
return find_all_from_set<void>(data, keys, 0, nullptr, stream);
}
} // namespace io
} // namespace cudf
| 4c7223030248a267b44ddf6692547c04d0032c68.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/types.hpp>
#include <cudf/utilities/error.hpp>
#include <thrust/pair.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
namespace cudf {
namespace io {
namespace {
// When processing the input in chunks, this is the maximum size of each chunk.
// Only one chunk is loaded on the GPU at a time, so this value is chosen to
// be small enough to fit on the GPU in most cases.
constexpr size_t max_chunk_bytes = 256 * 1024 * 1024; // 256MB
constexpr int bytes_per_find_thread = 64;
using pos_key_pair = thrust::pair<uint64_t, char>;
template <typename T>
constexpr T divCeil(T dividend, T divisor) noexcept
{
return (dividend + divisor - 1) / divisor;
}
/**
* @brief Sets the specified element of the array to the passed value
*/
template <class T, class V>
__device__ __forceinline__ void setElement(T* array, cudf::size_type idx, const T& t, const V&)
{
array[idx] = t;
}
/**
* @brief Sets the specified element of the array of pairs using the two passed
* parameters.
*/
template <class T, class V>
__device__ __forceinline__ void setElement(thrust::pair<T, V>* array,
cudf::size_type idx,
const T& t,
const V& v)
{
array[idx] = {t, v};
}
/**
* @brief Overloads the setElement() functions for void* arrays.
* Does not do anything, indexing is not allowed with void* arrays.
*/
template <class T, class V>
__device__ __forceinline__ void setElement(void*, cudf::size_type, const T&, const V&)
{
}
/**
* @brief CUDA kernel that finds all occurrences of a character in the given
* character array. If the 'positions' parameter is not void*,
* positions of all occurrences are stored in the output array.
*
* @param[in] data Pointer to the input character array
* @param[in] size Number of bytes in the input array
* @param[in] offset Offset to add to the output positions
* @param[in] key Character to find in the array
* @param[in,out] count Pointer to the number of found occurrences
* @param[out] positions Array containing the output positions
*/
template <class T>
__global__ void count_and_set_positions(const char* data,
uint64_t size,
uint64_t offset,
const char key,
cudf::size_type* count,
T* positions)
{
// thread IDs range per block, so also need the block id
const uint64_t tid = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t did = tid * bytes_per_find_thread;
const char* raw = (data + did);
const long byteToProcess =
((did + bytes_per_find_thread) < size) ? bytes_per_find_thread : (size - did);
// Process the data
for (long i = 0; i < byteToProcess; i++) {
if (raw[i] == key) {
const auto idx = atomicAdd(count, (cudf::size_type)1);
setElement(positions, idx, did + offset + i, key);
}
}
}
} // namespace
template <class T>
cudf::size_type find_all_from_set(device_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
T* positions,
rmm::cuda_stream_view stream)
{
int block_size = 0; // suggested thread count to use
int min_grid_size = 0; // minimum block count required
CUDF_CUDA_TRY(
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, count_and_set_positions<T>));
const int grid_size = divCeil(data.size(), (size_t)block_size);
auto d_count = cudf::detail::make_zeroed_device_uvector_async<cudf::size_type>(1, stream);
for (char key : keys) {
count_and_set_positions<T><<<grid_size, block_size, 0, stream.value()>>>(
data.data(), data.size(), result_offset, key, d_count.data(), positions);
}
return cudf::detail::make_std_vector_sync(d_count, stream)[0];
}
template <class T>
cudf::size_type find_all_from_set(host_span<char const> data,
const std::vector<char>& keys,
uint64_t result_offset,
T* positions,
rmm::cuda_stream_view stream)
{
rmm::device_buffer d_chunk(std::min(max_chunk_bytes, data.size()), stream);
auto d_count = cudf::detail::make_zeroed_device_uvector_async<cudf::size_type>(1, stream);
int block_size = 0; // suggested thread count to use
int min_grid_size = 0; // minimum block count required
CUDF_CUDA_TRY(
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, count_and_set_positions<T>));
const size_t chunk_count = divCeil(data.size(), max_chunk_bytes);
for (size_t ci = 0; ci < chunk_count; ++ci) {
const auto chunk_offset = ci * max_chunk_bytes;
const auto h_chunk = data.data() + chunk_offset;
const int chunk_bytes = std::min((size_t)(data.size() - ci * max_chunk_bytes), max_chunk_bytes);
const auto chunk_bits = divCeil(chunk_bytes, bytes_per_find_thread);
const int grid_size = divCeil(chunk_bits, block_size);
// Copy chunk to device
CUDF_CUDA_TRY(
cudaMemcpyAsync(d_chunk.data(), h_chunk, chunk_bytes, cudaMemcpyDefault, stream.value()));
for (char key : keys) {
count_and_set_positions<T>
<<<grid_size, block_size, 0, stream.value()>>>(static_cast<char*>(d_chunk.data()),
chunk_bytes,
chunk_offset + result_offset,
key,
d_count.data(),
positions);
}
}
return cudf::detail::make_std_vector_sync(d_count, stream)[0];
}
template cudf::size_type find_all_from_set<uint64_t>(device_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
uint64_t* positions,
rmm::cuda_stream_view stream);
template cudf::size_type find_all_from_set<pos_key_pair>(device_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
pos_key_pair* positions,
rmm::cuda_stream_view stream);
template cudf::size_type find_all_from_set<uint64_t>(host_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
uint64_t* positions,
rmm::cuda_stream_view stream);
template cudf::size_type find_all_from_set<pos_key_pair>(host_span<char const> data,
std::vector<char> const& keys,
uint64_t result_offset,
pos_key_pair* positions,
rmm::cuda_stream_view stream);
cudf::size_type count_all_from_set(device_span<char const> data,
std::vector<char> const& keys,
rmm::cuda_stream_view stream)
{
return find_all_from_set<void>(data, keys, 0, nullptr, stream);
}
cudf::size_type count_all_from_set(host_span<char const> data,
const std::vector<char>& keys,
rmm::cuda_stream_view stream)
{
return find_all_from_set<void>(data, keys, 0, nullptr, stream);
}
} // namespace io
} // namespace cudf
|
3899e3914a12a7c817439cd7b9274c509e16e8cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
// Change the code here:
// This should be changed to GPU kernel definition
void matAdd(int width, int height, const float* A, const float* B, float* C)
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int index = i*width + j;
C[index] = A[index] + B[index];
}
}
}
int main()
{
int width = 1000;
int height = 100;
int numElements = width*height;
float* A = (float*)calloc(numElements, sizeof(float));
float* B = (float*)calloc(numElements, sizeof(float));
float* C = (float*)calloc(numElements, sizeof(float));
srand(1214134);
for (int i = 0; i < numElements; i++)
{
A[i] = float(rand())/float(RAND_MAX + 1.0);
B[i] = float(rand())/float(RAND_MAX + 1.0);
}
// Insert your code here:
// 1. Create GPU device buffers
// 2. Copy input data from host to device (matrixes A and B)
// 3. Change the CPU function call to the GPU kernel call
matAdd(width, height, A, B, C);
// 4. Copy the result back (matrix C)
for (int i = 0; i < ::min(5, height); i++)
{
for (int j = 0; j < ::min(5, width); j++)
{
int index = i*width + j;
printf("%3.2f + %3.2f = %3.2f;\t", A[index], B[index], C[index]);
}
printf("...\n");
}
printf("...\n");
free(A);
free(B);
free(C);
// Free GPU memory here
return 0;
}
| 3899e3914a12a7c817439cd7b9274c509e16e8cf.cu | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
// Change the code here:
// This should be changed to GPU kernel definition
void matAdd(int width, int height, const float* A, const float* B, float* C)
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int index = i*width + j;
C[index] = A[index] + B[index];
}
}
}
int main()
{
int width = 1000;
int height = 100;
int numElements = width*height;
float* A = (float*)calloc(numElements, sizeof(float));
float* B = (float*)calloc(numElements, sizeof(float));
float* C = (float*)calloc(numElements, sizeof(float));
srand(1214134);
for (int i = 0; i < numElements; i++)
{
A[i] = float(rand())/float(RAND_MAX + 1.0);
B[i] = float(rand())/float(RAND_MAX + 1.0);
}
// Insert your code here:
// 1. Create GPU device buffers
// 2. Copy input data from host to device (matrixes A and B)
// 3. Change the CPU function call to the GPU kernel call
matAdd(width, height, A, B, C);
// 4. Copy the result back (matrix C)
for (int i = 0; i < std::min(5, height); i++)
{
for (int j = 0; j < std::min(5, width); j++)
{
int index = i*width + j;
printf("%3.2f + %3.2f = %3.2f;\t", A[index], B[index], C[index]);
}
printf("...\n");
}
printf("...\n");
free(A);
free(B);
free(C);
// Free GPU memory here
return 0;
}
|
86bc33c517d18eb8d207065fbefb90346e1cad44.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/euclidean_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
// DeepDriving Changes: Normalize the training data before calculating the loss
//
// For any reason, the original DeepDriving training data is not normalized, thus
// we have to do it here before calculating the loss. This is very... ugly!
// Maybe it would be better to implement a script which preprocess the training
// data by normalizing it before training starts. I will think about it in a further
// implementation.
int BatchSize = bottom[0]->num();
int OutputDimension = count/BatchSize;
const Dtype* CudaResult = bottom[0]->gpu_data();
const Dtype* CudaLabel = bottom[1]->gpu_data();
Dtype Label[BatchSize*14];
Dtype Result[count];
Dtype LabelArray[count];
hipMemcpy(Result, CudaResult, sizeof(Dtype) * count, hipMemcpyDeviceToHost);
hipMemcpy(Label, CudaLabel, sizeof(Dtype) * BatchSize * 14, hipMemcpyDeviceToHost);
for (int i = 0; i < BatchSize; ++i)
{
LabelArray[i * OutputDimension] = Label[i*14+0]/1.1+0.5; // angle range ~ [-0.5, 0.5]
if (LabelArray[i * OutputDimension]>1.0) LabelArray[i * OutputDimension]=1.0;
if (LabelArray[i * OutputDimension]<0.0) LabelArray[i * OutputDimension]=0.0;
LabelArray[i * OutputDimension + 1] = Label[i*14+1]*0.17778+1.34445; // toMarking_L range ~ [-7, -2.5]
LabelArray[i * OutputDimension + 2] = Label[i*14+2]*0.14545+0.39091; // toMarking_M range ~ [-2, 3.5]
LabelArray[i * OutputDimension + 3] = Label[i*14+3]*0.17778-0.34445; // toMarking_R range ~ [2.5, 7]
LabelArray[i * OutputDimension + 4] = Label[i*14+4]/95.0+0.12; // dist_L range ~ [0, 75]
LabelArray[i * OutputDimension + 5] = Label[i*14+5]/95.0+0.12; // dist_R range ~ [0, 75]
LabelArray[i * OutputDimension + 6] = Label[i*14+6]*0.14545+1.48181; // toMarking_LL range ~ [-9.5, -4]
LabelArray[i * OutputDimension + 7] = Label[i*14+7]*0.16+0.98; // toMarking_ML range ~ [-5.5, -0.5]
LabelArray[i * OutputDimension + 8] = Label[i*14+8]*0.16+0.02; // toMarking_MR range ~ [0.5, 5.5]
LabelArray[i * OutputDimension + 9] = Label[i*14+9]*0.14545-0.48181; // toMarking_RR range ~ [4, 9.5]
LabelArray[i * OutputDimension + 10] = Label[i*14+10]/95.0+0.12; // dist_LL range ~ [0, 75]
LabelArray[i * OutputDimension + 11] = Label[i*14+11]/95.0+0.12; // dist_MM range ~ [0, 75]
LabelArray[i * OutputDimension + 12] = Label[i*14+12]/95.0+0.12; // dist_RR range ~ [0, 75]
LabelArray[i * OutputDimension + 13] = Label[i*14+13]*0.6+0.2; // fast range ~ {0, 1}
}
Dtype* CudaLabelArray;
hipMalloc((void**)&CudaLabelArray, sizeof(Dtype) * count);
hipMemcpy(CudaLabelArray, LabelArray, sizeof(Dtype) * count, hipMemcpyHostToDevice);
caffe_gpu_sub(
count,
CudaResult,
CudaLabelArray,
diff_.mutable_gpu_data());
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / bottom[0]->num() / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
hipFree(CudaLabelArray);
/*
Dtype Differences[count];
hipMemcpy(Differences, diff_.gpu_data(), sizeof(Dtype) * count, hipMemcpyDeviceToHost);
//for (int i = 0; i < BatchSize; ++i)
{
int i=0;
for (int j = 0; j < OutputDimension; ++j)
{
printf("BatchElement: %d, Value: %d, Result: %f, LabelArray: %f, diff: %f \n",
i,
j,
Result[i*OutputDimension+j],
LabelArray[i*OutputDimension+j],
Differences[i*OutputDimension+j]);
fflush(stdout);
}
}
printf("Current Loss: %f\n", loss);
*/
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer);
} // namespace caffe
| 86bc33c517d18eb8d207065fbefb90346e1cad44.cu | #include <vector>
#include "caffe/layers/euclidean_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
// DeepDriving Changes: Normalize the training data before calculating the loss
//
// For any reason, the original DeepDriving training data is not normalized, thus
// we have to do it here before calculating the loss. This is very... ugly!
// Maybe it would be better to implement a script which preprocess the training
// data by normalizing it before training starts. I will think about it in a further
// implementation.
int BatchSize = bottom[0]->num();
int OutputDimension = count/BatchSize;
const Dtype* CudaResult = bottom[0]->gpu_data();
const Dtype* CudaLabel = bottom[1]->gpu_data();
Dtype Label[BatchSize*14];
Dtype Result[count];
Dtype LabelArray[count];
cudaMemcpy(Result, CudaResult, sizeof(Dtype) * count, cudaMemcpyDeviceToHost);
cudaMemcpy(Label, CudaLabel, sizeof(Dtype) * BatchSize * 14, cudaMemcpyDeviceToHost);
for (int i = 0; i < BatchSize; ++i)
{
LabelArray[i * OutputDimension] = Label[i*14+0]/1.1+0.5; // angle range ~ [-0.5, 0.5]
if (LabelArray[i * OutputDimension]>1.0) LabelArray[i * OutputDimension]=1.0;
if (LabelArray[i * OutputDimension]<0.0) LabelArray[i * OutputDimension]=0.0;
LabelArray[i * OutputDimension + 1] = Label[i*14+1]*0.17778+1.34445; // toMarking_L range ~ [-7, -2.5]
LabelArray[i * OutputDimension + 2] = Label[i*14+2]*0.14545+0.39091; // toMarking_M range ~ [-2, 3.5]
LabelArray[i * OutputDimension + 3] = Label[i*14+3]*0.17778-0.34445; // toMarking_R range ~ [2.5, 7]
LabelArray[i * OutputDimension + 4] = Label[i*14+4]/95.0+0.12; // dist_L range ~ [0, 75]
LabelArray[i * OutputDimension + 5] = Label[i*14+5]/95.0+0.12; // dist_R range ~ [0, 75]
LabelArray[i * OutputDimension + 6] = Label[i*14+6]*0.14545+1.48181; // toMarking_LL range ~ [-9.5, -4]
LabelArray[i * OutputDimension + 7] = Label[i*14+7]*0.16+0.98; // toMarking_ML range ~ [-5.5, -0.5]
LabelArray[i * OutputDimension + 8] = Label[i*14+8]*0.16+0.02; // toMarking_MR range ~ [0.5, 5.5]
LabelArray[i * OutputDimension + 9] = Label[i*14+9]*0.14545-0.48181; // toMarking_RR range ~ [4, 9.5]
LabelArray[i * OutputDimension + 10] = Label[i*14+10]/95.0+0.12; // dist_LL range ~ [0, 75]
LabelArray[i * OutputDimension + 11] = Label[i*14+11]/95.0+0.12; // dist_MM range ~ [0, 75]
LabelArray[i * OutputDimension + 12] = Label[i*14+12]/95.0+0.12; // dist_RR range ~ [0, 75]
LabelArray[i * OutputDimension + 13] = Label[i*14+13]*0.6+0.2; // fast range ~ {0, 1}
}
Dtype* CudaLabelArray;
cudaMalloc((void**)&CudaLabelArray, sizeof(Dtype) * count);
cudaMemcpy(CudaLabelArray, LabelArray, sizeof(Dtype) * count, cudaMemcpyHostToDevice);
caffe_gpu_sub(
count,
CudaResult,
CudaLabelArray,
diff_.mutable_gpu_data());
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / bottom[0]->num() / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
cudaFree(CudaLabelArray);
/*
Dtype Differences[count];
cudaMemcpy(Differences, diff_.gpu_data(), sizeof(Dtype) * count, cudaMemcpyDeviceToHost);
//for (int i = 0; i < BatchSize; ++i)
{
int i=0;
for (int j = 0; j < OutputDimension; ++j)
{
printf("BatchElement: %d, Value: %d, Result: %f, LabelArray: %f, diff: %f \n",
i,
j,
Result[i*OutputDimension+j],
LabelArray[i*OutputDimension+j],
Differences[i*OutputDimension+j]);
fflush(stdout);
}
}
printf("Current Loss: %f\n", loss);
*/
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer);
} // namespace caffe
|
9c97b68ab52c62ed9aa17963e12d2b22b6d9c9fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "stdio.h"
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <hip/hip_runtime.h>
#include <cassert>
#include "book.h"
// #include "/private/herten/NVIDIA_GPU_Computing_SDK/C/src/simplePrintf/cuPrintf.cuh"
//#include "../cuPrintf.cuh"
struct ThreadInfo {
int deviceToRunOn;
thrust::host_vector<int> host_data_a;
thrust::host_vector<int> host_data_b;
int numData;
int returnValue;
};
void * threadFunction(void* tinfo) {
ThreadInfo * currData = (ThreadInfo*) tinfo;
int nDev = currData->deviceToRunOn;
hipSetDevice(nDev);
thrust::device_vector<int> localA = currData->host_data_a;
thrust::device_vector<int> localB = currData->host_data_b;
// std::cout << "[" << nDev << "]: localA[0] = " << localA[0] << std::endl;
// std::cout << "[" << nDev << "]: localB[0] = " << localB[0] << std::endl;
int dotProductResult = thrust::inner_product(localA.begin(), localA.end(), localB.begin(), 0);
// std::cout << "[" << nDev << "]: dot product = " << dotProductResult << std::endl;
currData->returnValue = dotProductResult;
return 0;
};
int main (int argc, char** argv) {
int sizeOfVector = 100;
int elementOne = 1;
int elementTwo = 2;
if (argc > 1) elementOne = atoi(argv[1]);
if (argc > 1) elementTwo = atoi(argv[2]);
thrust::host_vector<int> firstVectorA(sizeOfVector/2, elementOne);
thrust::host_vector<int> firstVectorB(sizeOfVector/2, elementOne);
thrust::host_vector<int> secondVectorA(sizeOfVector/2, elementTwo);
thrust::host_vector<int> secondVectorB(sizeOfVector/2, elementTwo);
ThreadInfo oneThread;
oneThread.deviceToRunOn = 0;
oneThread.host_data_a = firstVectorA;
oneThread.host_data_b = secondVectorA;
oneThread.numData = sizeOfVector / 2;
ThreadInfo twoThread;
twoThread.deviceToRunOn = 1;
twoThread.host_data_a = firstVectorB;
twoThread.host_data_b = secondVectorB;
twoThread.numData = (1+sizeOfVector) / 2;
CUTThread threadId = start_thread(&threadFunction, &oneThread);
threadFunction(&twoThread);
end_thread(threadId);
int finalDotProduct = oneThread.returnValue + twoThread.returnValue;
std::cout << "Dot product is: " << oneThread.returnValue << " + " << twoThread.returnValue << " = " << finalDotProduct << std::endl;
}
| 9c97b68ab52c62ed9aa17963e12d2b22b6d9c9fb.cu | #include <iostream>
#include "stdio.h"
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuda.h>
#include <cassert>
#include "book.h"
// #include "/private/herten/NVIDIA_GPU_Computing_SDK/C/src/simplePrintf/cuPrintf.cuh"
//#include "../cuPrintf.cuh"
struct ThreadInfo {
int deviceToRunOn;
thrust::host_vector<int> host_data_a;
thrust::host_vector<int> host_data_b;
int numData;
int returnValue;
};
void * threadFunction(void* tinfo) {
ThreadInfo * currData = (ThreadInfo*) tinfo;
int nDev = currData->deviceToRunOn;
cudaSetDevice(nDev);
thrust::device_vector<int> localA = currData->host_data_a;
thrust::device_vector<int> localB = currData->host_data_b;
// std::cout << "[" << nDev << "]: localA[0] = " << localA[0] << std::endl;
// std::cout << "[" << nDev << "]: localB[0] = " << localB[0] << std::endl;
int dotProductResult = thrust::inner_product(localA.begin(), localA.end(), localB.begin(), 0);
// std::cout << "[" << nDev << "]: dot product = " << dotProductResult << std::endl;
currData->returnValue = dotProductResult;
return 0;
};
int main (int argc, char** argv) {
int sizeOfVector = 100;
int elementOne = 1;
int elementTwo = 2;
if (argc > 1) elementOne = atoi(argv[1]);
if (argc > 1) elementTwo = atoi(argv[2]);
thrust::host_vector<int> firstVectorA(sizeOfVector/2, elementOne);
thrust::host_vector<int> firstVectorB(sizeOfVector/2, elementOne);
thrust::host_vector<int> secondVectorA(sizeOfVector/2, elementTwo);
thrust::host_vector<int> secondVectorB(sizeOfVector/2, elementTwo);
ThreadInfo oneThread;
oneThread.deviceToRunOn = 0;
oneThread.host_data_a = firstVectorA;
oneThread.host_data_b = secondVectorA;
oneThread.numData = sizeOfVector / 2;
ThreadInfo twoThread;
twoThread.deviceToRunOn = 1;
twoThread.host_data_a = firstVectorB;
twoThread.host_data_b = secondVectorB;
twoThread.numData = (1+sizeOfVector) / 2;
CUTThread threadId = start_thread(&threadFunction, &oneThread);
threadFunction(&twoThread);
end_thread(threadId);
int finalDotProduct = oneThread.returnValue + twoThread.returnValue;
std::cout << "Dot product is: " << oneThread.returnValue << " + " << twoThread.returnValue << " = " << finalDotProduct << std::endl;
}
|
a5e5f0c7efa8675773078a09c6621c7bfd52536c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy,
typename Iterator1,
typename Iterator2,
typename Iterator3,
typename Iterator4,
typename Iterator5,
typename Iterator6,
typename Iterator7>
__global__
void merge_by_key_kernel(ExecutionPolicy exec,
Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::merge_by_key(exec, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, values_first2, keys_result, values_result);
}
template<typename ExecutionPolicy>
void TestMergeByKeyDevice(ExecutionPolicy exec)
{
thrust::device_vector<int> a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
thrust::device_vector<int> ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
thrust::device_vector<int> result_key(7), result_val(7);
typedef typename thrust::device_vector<int>::iterator Iterator;
thrust::device_vector<thrust::pair<Iterator,Iterator> > result_ends(1);
hipLaunchKernelGGL(( merge_by_key_kernel), dim3(1),dim3(1), 0, 0, exec,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin(),
result_ends.begin());
thrust::pair<Iterator,Iterator> ends = result_ends[0];
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
void TestMergeByKeyDeviceSeq()
{
TestMergeByKeyDevice(thrust::seq);
}
DECLARE_UNITTEST(TestMergeByKeyDeviceSeq);
void TestMergeByKeyDeviceDevice()
{
TestMergeByKeyDevice(thrust::device);
}
DECLARE_UNITTEST(TestMergeByKeyDeviceDevice);
void TestMergeByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
Vector ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
Vector result_key(7), result_val(7);
hipStream_t s;
hipStreamCreate(&s);
thrust::pair<Iterator,Iterator> ends =
thrust::merge_by_key(thrust::hip::par.on(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeByKeyCudaStreams);
| a5e5f0c7efa8675773078a09c6621c7bfd52536c.cu | #include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
template<typename ExecutionPolicy,
typename Iterator1,
typename Iterator2,
typename Iterator3,
typename Iterator4,
typename Iterator5,
typename Iterator6,
typename Iterator7>
__global__
void merge_by_key_kernel(ExecutionPolicy exec,
Iterator1 keys_first1, Iterator1 keys_last1,
Iterator2 keys_first2, Iterator2 keys_last2,
Iterator3 values_first1,
Iterator4 values_first2,
Iterator5 keys_result,
Iterator6 values_result,
Iterator7 result)
{
*result = thrust::merge_by_key(exec, keys_first1, keys_last1, keys_first2, keys_last2, values_first1, values_first2, keys_result, values_result);
}
template<typename ExecutionPolicy>
void TestMergeByKeyDevice(ExecutionPolicy exec)
{
thrust::device_vector<int> a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
thrust::device_vector<int> ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
thrust::device_vector<int> result_key(7), result_val(7);
typedef typename thrust::device_vector<int>::iterator Iterator;
thrust::device_vector<thrust::pair<Iterator,Iterator> > result_ends(1);
merge_by_key_kernel<<<1,1>>>(exec,
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin(),
result_ends.begin());
thrust::pair<Iterator,Iterator> ends = result_ends[0];
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
}
void TestMergeByKeyDeviceSeq()
{
TestMergeByKeyDevice(thrust::seq);
}
DECLARE_UNITTEST(TestMergeByKeyDeviceSeq);
void TestMergeByKeyDeviceDevice()
{
TestMergeByKeyDevice(thrust::device);
}
DECLARE_UNITTEST(TestMergeByKeyDeviceDevice);
void TestMergeByKeyCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::iterator Iterator;
Vector a_key(3), a_val(3), b_key(4), b_val(4);
a_key[0] = 0; a_key[1] = 2; a_key[2] = 4;
a_val[0] = 13; a_val[1] = 7; a_val[2] = 42;
b_key[0] = 0 ; b_key[1] = 3; b_key[2] = 3; b_key[3] = 4;
b_val[0] = 42; b_val[1] = 42; b_val[2] = 7; b_val[3] = 13;
Vector ref_key(7), ref_val(7);
ref_key[0] = 0; ref_val[0] = 13;
ref_key[1] = 0; ref_val[1] = 42;
ref_key[2] = 2; ref_val[2] = 7;
ref_key[3] = 3; ref_val[3] = 42;
ref_key[4] = 3; ref_val[4] = 7;
ref_key[5] = 4; ref_val[5] = 42;
ref_key[6] = 4; ref_val[6] = 13;
Vector result_key(7), result_val(7);
cudaStream_t s;
cudaStreamCreate(&s);
thrust::pair<Iterator,Iterator> ends =
thrust::merge_by_key(thrust::cuda::par.on(s),
a_key.begin(), a_key.end(),
b_key.begin(), b_key.end(),
a_val.begin(), b_val.begin(),
result_key.begin(),
result_val.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result_key.end(), ends.first);
ASSERT_EQUAL_QUIET(result_val.end(), ends.second);
ASSERT_EQUAL(ref_key, result_key);
ASSERT_EQUAL(ref_val, result_val);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeByKeyCudaStreams);
|
ebca281c149add04525f737fa781f1e381a5d2d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <THHUNN/common.h>
#include <THH/THHDeviceTensor.cuh>
#include <THH/THHDeviceTensorUtils.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <cfloat>
#include <THHUNN/generic/VolumetricMaxPooling.hip>
#include <THH/THHGenerateFloatTypes.h>
| ebca281c149add04525f737fa781f1e381a5d2d9.cu | #include <THCUNN/THCUNN.h>
#include <THCUNN/common.h>
#include <THC/THCDeviceTensor.cuh>
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cfloat>
#include <THCUNN/generic/VolumetricMaxPooling.cu>
#include <THC/THCGenerateFloatTypes.h>
|
c988022e989281187d99dca6df17645961ecd741.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "fill.cuh"
#include "max.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_2d(const AccessorRW<T, 2> inout, const AccessorRO<T, 2> in, const Rect<2> bounds, const T identity, const int axis) {
coord_t y = bounds.lo[1] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t x = bounds.lo[0] + (blockIdx.z * gridDim.y + blockIdx.y) * blockDim.y + threadIdx.y;
const Point<2> p(x, y);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
x += gridDim.z * gridDim.y * blockDim.y;
}
} else {
while (y <= bounds.hi[1]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
y += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
MaxReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) MaxReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_3d(const AccessorRW<T, 3> inout, const AccessorRO<T, 3> in, const Rect<3> bounds, const T identity, const int axis) {
coord_t z = bounds.lo[2] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t y = bounds.lo[1] + blockIdx.y * blockDim.y + threadIdx.y;
coord_t x = bounds.lo[0] + blockIdx.z * blockDim.z + threadIdx.z;
const Point<3> p(x, y, z);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
x += gridDim.z * blockDim.z;
}
} else if (axis == 1) {
while (y <= bounds.hi[1]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
y += gridDim.y * blockDim.y;
}
} else {
while (z <= bounds.hi[2]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
z += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.z * blockDim.y + threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
MaxReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) MaxReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
/*static*/ void MaxTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int axis = derez.unpack_dimension();
const int collapse_dim = derez.unpack_dimension();
const int init_dim = derez.unpack_dimension();
const T initial_value = (task->futures.size() == 1) ? task->futures[0].get_result<T>() : MaxReduction<T>::identity;
switch (init_dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 1> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_fill_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, initial_value, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 2> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_fill_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, initial_value, rect.lo, Point<1>(pitch), volume);
break;
}
default:
assert(false); // shouldn't see any other cases
}
const int dim = derez.unpack_dimension();
switch (dim) {
// Should never get the case of 1 as this would just be a copy since
// reducing our only dimension should have called MaxReducTask
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 2> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 2, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_2d_reduction(blocks, threads, rect, axis, (const void*)legate_max_2d<T>);
hipLaunchKernelGGL(( legate_max_2d<T>), dim3(blocks), dim3(threads), 0, 0, inout, in, rect, MaxReduction<T>::identity, axis);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 3> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 3, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_3d_reduction(blocks, threads, rect, axis, (const void*)legate_max_3d<T>);
hipLaunchKernelGGL(( legate_max_3d<T>), dim3(blocks), dim3(threads), 0, 0, inout, in, rect, MaxReduction<T>::identity, axis);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(MaxTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_reduce_1d(DeferredReduction<MaxReduction<T>> result, const AccessorRO<T, 1> in, const size_t iters,
const Point<1> origin, const size_t max, const T identity) {
T value = identity;
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
MaxReduction<T>::template fold<true>(value, in[x]);
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_reduce_2d(DeferredReduction<MaxReduction<T>> result, const AccessorRO<T, 2> in, const size_t iters,
const Point<2> origin, const Point<1> pitch, const size_t max, const T identity) {
T value = identity;
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
MaxReduction<T>::template fold<true>(value, in[x][y]);
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_reduce_3d(DeferredReduction<MaxReduction<T>> result, const AccessorRO<T, 3> in, const size_t iters,
const Point<3> origin, const Point<2> pitch, const size_t max, const T identity) {
T value = identity;
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
MaxReduction<T>::template fold<true>(value, in[x][y][z]);
}
}
reduce_output(result, value);
}
template<typename T>
/*static*/ DeferredReduction<MaxReduction<T>>
MaxReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) {
DeferredReduction<MaxReduction<T>> result;
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
hipLaunchKernelGGL(( legate_max_reduce_1d<T>)
, dim3(MAX_REDUCTION_CTAS), dim3(THREADS_PER_BLOCK), 0, 0, result, in, iters, rect.lo, volume, MaxReduction<T>::identity);
} else {
hipLaunchKernelGGL(( legate_max_reduce_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in, 1 /*iter*/, rect.lo, volume, MaxReduction<T>::identity);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
hipLaunchKernelGGL(( legate_max_reduce_2d<T>), dim3(MAX_REDUCTION_CTAS), dim3(THREADS_PER_BLOCK), 0, 0, result, in, iters, rect.lo, Point<1>(pitch), volume,
MaxReduction<T>::identity);
} else {
hipLaunchKernelGGL(( legate_max_reduce_2d<T>)
, dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in, 1 /*iter*/, rect.lo, Point<1>(pitch), volume, MaxReduction<T>::identity);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
hipLaunchKernelGGL(( legate_max_reduce_3d<T>), dim3(MAX_REDUCTION_CTAS), dim3(THREADS_PER_BLOCK), 0, 0, result, in, iters, rect.lo, Point<2>(pitch), volume,
MaxReduction<T>::identity);
} else {
hipLaunchKernelGGL(( legate_max_reduce_3d<T>)
, dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in, 1 /*iter*/, rect.lo, Point<2>(pitch), volume, MaxReduction<T>::identity);
}
break;
}
default:
assert(false);
}
return result;
}
INSTANTIATE_DEFERRED_REDUCTION_TASK_VARIANT(MaxReducTask, MaxReduction, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_1d(const AccessorWO<T, 1> out, const AccessorRO<T, 1> in1, const AccessorRO<T, 1> in2, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
out[x] = MAX(in1[x], in2[x]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_1d_inplace(const AccessorRW<T, 1> out, const AccessorRO<T, 1> in, const Point<1> origin, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
MaxReduction<T>::template fold<true /*exclusive*/>(out[x], in[x]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_2d(const AccessorWO<T, 2> out, const AccessorRO<T, 2> in1, const AccessorRO<T, 2> in2, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
out[x][y] = MAX(in1[x][y], in2[x][y]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_2d_inplace(const AccessorRW<T, 2> out, const AccessorRO<T, 2> in, const Point<2> origin, const Point<1> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y], in[x][y]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_3d(const AccessorWO<T, 3> out, const AccessorRO<T, 3> in1, const AccessorRO<T, 3> in2, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
out[x][y][z] = MAX(in1[x][y][z], in2[x][y][z]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_3d_inplace(const AccessorRW<T, 3> out, const AccessorRO<T, 3> in, const Point<3> origin, const Point<2> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y][z], in[x][y][z]);
}
template<typename T>
/*static*/ void BinMaxTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 2) {
const AccessorRW<T, 1> out = derez.unpack_accessor_RW<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_binmax_1d_inplace<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in, rect.lo, volume);
} else {
const AccessorWO<T, 1> out = derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const AccessorRO<T, 1> in2 = derez.unpack_accessor_RO<T, 1>(regions[2], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_binmax_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in1, in2, rect.lo, volume);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 2) {
const AccessorRW<T, 2> out = derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_binmax_2d_inplace<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in, rect.lo, Point<1>(pitch), volume);
} else {
const AccessorWO<T, 2> out = derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const AccessorRO<T, 2> in2 = derez.unpack_accessor_RO<T, 2>(regions[2], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_binmax_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in1, in2, rect.lo, Point<1>(pitch), volume);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 2) {
const AccessorRW<T, 3> out = derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_binmax_3d_inplace<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in, rect.lo, Point<2>(pitch), volume);
} else {
const AccessorWO<T, 3> out = derez.unpack_accessor_WO<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const AccessorRO<T, 3> in2 = derez.unpack_accessor_RO<T, 3>(regions[2], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_binmax_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in1, in2, rect.lo, Point<2>(pitch), volume);
}
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(BinMaxTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_1d(const AccessorWO<T, 1> out, const AccessorRO<T, 1> in1, const T in2, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
out[x] = MAX(in1[x], in2);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_1d_inplace(const AccessorRW<T, 1> out, const T in, const Point<1> origin, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
MaxReduction<T>::template fold<true /*exclusive*/>(out[x], in);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_2d(const AccessorWO<T, 2> out, const AccessorRO<T, 2> in1, const T in2, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
out[x][y] = MAX(in1[x][y], in2);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_2d_inplace(const AccessorRW<T, 2> out, const T in, const Point<2> origin, const Point<1> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y], in);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_3d(const AccessorWO<T, 3> out, const AccessorRO<T, 3> in1, const T in2, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
out[x][y][z] = MAX(in1[x][y][z], in2);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_3d_inplace(const AccessorRW<T, 3> out, const T in, const Point<3> origin, const Point<2> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y][z], in);
}
template<typename T>
/*static*/ void BinMaxBroadcast<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
assert(task->futures.size() == 1);
const T in2 = task->futures[0].get_result<T>();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 1) {
const AccessorRW<T, 1> out = derez.unpack_accessor_RW<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_binmax_broadcast_1d_inplace<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in2, rect.lo, volume);
} else {
const AccessorWO<T, 1> out = derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_binmax_broadcast_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in1, in2, rect.lo, volume);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 1) {
const AccessorRW<T, 2> out = derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_binmax_broadcast_2d_inplace<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in2, rect.lo, Point<1>(pitch), volume);
} else {
const AccessorWO<T, 2> out = derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_binmax_broadcast_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in1, in2, rect.lo, Point<1>(pitch), volume);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 1) {
const AccessorRW<T, 3> out = derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_binmax_broadcast_3d_inplace<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in2, rect.lo, Point<2>(pitch), volume);
} else {
const AccessorWO<T, 3> out = derez.unpack_accessor_WO<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_binmax_broadcast_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, in1, in2, rect.lo, Point<2>(pitch), volume);
}
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(BinMaxBroadcast, gpu_variant)
template<typename T, int DIM>
struct MaxRadixArgs {
AccessorRO<T, DIM> in[MAX_REDUCTION_RADIX];
};
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_radix_1d(const AccessorWO<T, 1> out, const MaxRadixArgs<T, 1> args, const size_t argmax, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
T val = args.in[0][x];
for (unsigned idx = 1; idx < argmax; idx++)
MaxReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x]);
out[x] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_radix_2d(const AccessorWO<T, 2> out, const MaxRadixArgs<T, 2> args, const size_t argmax, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = args.in[0][x][y];
for (unsigned idx = 1; idx < argmax; idx++)
MaxReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y]);
out[x][y] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_radix_3d(const AccessorWO<T, 3> out, const MaxRadixArgs<T, 3> args, const size_t argmax, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = args.in[0][x][y][z];
for (unsigned idx = 1; idx < argmax; idx++)
MaxReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y][z]);
out[x][y][z] = val;
}
template<typename T>
/*static*/ void MaxRadixTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
assert(task->regions.size() <= MAX_REDUCTION_RADIX);
const int radix = derez.unpack_dimension();
const int extra_dim_out = derez.unpack_dimension();
const int extra_dim_in = derez.unpack_dimension();
const int dim = derez.unpack_dimension();
const coord_t offset = (extra_dim_in >= 0) ? task->index_point[extra_dim_in] * radix : 0;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 1> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
MaxRadixArgs<T, 1> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 1>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_max_radix_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, args, num_inputs, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 2> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
MaxRadixArgs<T, 2> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 2>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_max_radix_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, args, num_inputs, rect.lo, Point<1>(pitch), volume);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 3> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 3>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 3>(regions[0], rect);
MaxRadixArgs<T, 3> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 3>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_max_radix_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, args, num_inputs, rect.lo, Point<2>(pitch), volume);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(MaxRadixTask, gpu_variant)
} // namespace numpy
} // namespace legate
| c988022e989281187d99dca6df17645961ecd741.cu | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "fill.cuh"
#include "max.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_2d(const AccessorRW<T, 2> inout, const AccessorRO<T, 2> in, const Rect<2> bounds, const T identity, const int axis) {
coord_t y = bounds.lo[1] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t x = bounds.lo[0] + (blockIdx.z * gridDim.y + blockIdx.y) * blockDim.y + threadIdx.y;
const Point<2> p(x, y);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
x += gridDim.z * gridDim.y * blockDim.y;
}
} else {
while (y <= bounds.hi[1]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
y += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
MaxReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) MaxReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_3d(const AccessorRW<T, 3> inout, const AccessorRO<T, 3> in, const Rect<3> bounds, const T identity, const int axis) {
coord_t z = bounds.lo[2] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t y = bounds.lo[1] + blockIdx.y * blockDim.y + threadIdx.y;
coord_t x = bounds.lo[0] + blockIdx.z * blockDim.z + threadIdx.z;
const Point<3> p(x, y, z);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
x += gridDim.z * blockDim.z;
}
} else if (axis == 1) {
while (y <= bounds.hi[1]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
y += gridDim.y * blockDim.y;
}
} else {
while (z <= bounds.hi[2]) {
MaxReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
z += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.z * blockDim.y + threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
MaxReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) MaxReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
/*static*/ void MaxTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int axis = derez.unpack_dimension();
const int collapse_dim = derez.unpack_dimension();
const int init_dim = derez.unpack_dimension();
const T initial_value = (task->futures.size() == 1) ? task->futures[0].get_result<T>() : MaxReduction<T>::identity;
switch (init_dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 1> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_fill_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, initial_value, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 2> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_fill_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, initial_value, rect.lo, Point<1>(pitch), volume);
break;
}
default:
assert(false); // shouldn't see any other cases
}
const int dim = derez.unpack_dimension();
switch (dim) {
// Should never get the case of 1 as this would just be a copy since
// reducing our only dimension should have called MaxReducTask
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 2> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 2, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_2d_reduction(blocks, threads, rect, axis, (const void*)legate_max_2d<T>);
legate_max_2d<T><<<blocks, threads>>>(inout, in, rect, MaxReduction<T>::identity, axis);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 3> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 3, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_3d_reduction(blocks, threads, rect, axis, (const void*)legate_max_3d<T>);
legate_max_3d<T><<<blocks, threads>>>(inout, in, rect, MaxReduction<T>::identity, axis);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(MaxTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_reduce_1d(DeferredReduction<MaxReduction<T>> result, const AccessorRO<T, 1> in, const size_t iters,
const Point<1> origin, const size_t max, const T identity) {
T value = identity;
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
MaxReduction<T>::template fold<true>(value, in[x]);
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_reduce_2d(DeferredReduction<MaxReduction<T>> result, const AccessorRO<T, 2> in, const size_t iters,
const Point<2> origin, const Point<1> pitch, const size_t max, const T identity) {
T value = identity;
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
MaxReduction<T>::template fold<true>(value, in[x][y]);
}
}
reduce_output(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_reduce_3d(DeferredReduction<MaxReduction<T>> result, const AccessorRO<T, 3> in, const size_t iters,
const Point<3> origin, const Point<2> pitch, const size_t max, const T identity) {
T value = identity;
for (unsigned idx = 0; idx < iters; idx++) {
const size_t offset = (idx * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
MaxReduction<T>::template fold<true>(value, in[x][y][z]);
}
}
reduce_output(result, value);
}
template<typename T>
/*static*/ DeferredReduction<MaxReduction<T>>
MaxReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) {
DeferredReduction<MaxReduction<T>> result;
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
legate_max_reduce_1d<T>
<<<MAX_REDUCTION_CTAS, THREADS_PER_BLOCK>>>(result, in, iters, rect.lo, volume, MaxReduction<T>::identity);
} else {
legate_max_reduce_1d<T><<<blocks, THREADS_PER_BLOCK>>>(result, in, 1 /*iter*/, rect.lo, volume, MaxReduction<T>::identity);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
legate_max_reduce_2d<T><<<MAX_REDUCTION_CTAS, THREADS_PER_BLOCK>>>(result, in, iters, rect.lo, Point<1>(pitch), volume,
MaxReduction<T>::identity);
} else {
legate_max_reduce_2d<T>
<<<blocks, THREADS_PER_BLOCK>>>(result, in, 1 /*iter*/, rect.lo, Point<1>(pitch), volume, MaxReduction<T>::identity);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
if (blocks >= MAX_REDUCTION_CTAS) {
const size_t iters = (blocks + MAX_REDUCTION_CTAS - 1) / MAX_REDUCTION_CTAS;
legate_max_reduce_3d<T><<<MAX_REDUCTION_CTAS, THREADS_PER_BLOCK>>>(result, in, iters, rect.lo, Point<2>(pitch), volume,
MaxReduction<T>::identity);
} else {
legate_max_reduce_3d<T>
<<<blocks, THREADS_PER_BLOCK>>>(result, in, 1 /*iter*/, rect.lo, Point<2>(pitch), volume, MaxReduction<T>::identity);
}
break;
}
default:
assert(false);
}
return result;
}
INSTANTIATE_DEFERRED_REDUCTION_TASK_VARIANT(MaxReducTask, MaxReduction, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_1d(const AccessorWO<T, 1> out, const AccessorRO<T, 1> in1, const AccessorRO<T, 1> in2, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
out[x] = MAX(in1[x], in2[x]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_1d_inplace(const AccessorRW<T, 1> out, const AccessorRO<T, 1> in, const Point<1> origin, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
MaxReduction<T>::template fold<true /*exclusive*/>(out[x], in[x]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_2d(const AccessorWO<T, 2> out, const AccessorRO<T, 2> in1, const AccessorRO<T, 2> in2, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
out[x][y] = MAX(in1[x][y], in2[x][y]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_2d_inplace(const AccessorRW<T, 2> out, const AccessorRO<T, 2> in, const Point<2> origin, const Point<1> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y], in[x][y]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_3d(const AccessorWO<T, 3> out, const AccessorRO<T, 3> in1, const AccessorRO<T, 3> in2, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
out[x][y][z] = MAX(in1[x][y][z], in2[x][y][z]);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_3d_inplace(const AccessorRW<T, 3> out, const AccessorRO<T, 3> in, const Point<3> origin, const Point<2> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y][z], in[x][y][z]);
}
template<typename T>
/*static*/ void BinMaxTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 2) {
const AccessorRW<T, 1> out = derez.unpack_accessor_RW<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_binmax_1d_inplace<T><<<blocks, THREADS_PER_BLOCK>>>(out, in, rect.lo, volume);
} else {
const AccessorWO<T, 1> out = derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const AccessorRO<T, 1> in2 = derez.unpack_accessor_RO<T, 1>(regions[2], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_binmax_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in1, in2, rect.lo, volume);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 2) {
const AccessorRW<T, 2> out = derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_binmax_2d_inplace<T><<<blocks, THREADS_PER_BLOCK>>>(out, in, rect.lo, Point<1>(pitch), volume);
} else {
const AccessorWO<T, 2> out = derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const AccessorRO<T, 2> in2 = derez.unpack_accessor_RO<T, 2>(regions[2], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_binmax_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in1, in2, rect.lo, Point<1>(pitch), volume);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 2) {
const AccessorRW<T, 3> out = derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_binmax_3d_inplace<T><<<blocks, THREADS_PER_BLOCK>>>(out, in, rect.lo, Point<2>(pitch), volume);
} else {
const AccessorWO<T, 3> out = derez.unpack_accessor_WO<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const AccessorRO<T, 3> in2 = derez.unpack_accessor_RO<T, 3>(regions[2], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_binmax_3d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in1, in2, rect.lo, Point<2>(pitch), volume);
}
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(BinMaxTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_1d(const AccessorWO<T, 1> out, const AccessorRO<T, 1> in1, const T in2, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
out[x] = MAX(in1[x], in2);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_1d_inplace(const AccessorRW<T, 1> out, const T in, const Point<1> origin, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
MaxReduction<T>::template fold<true /*exclusive*/>(out[x], in);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_2d(const AccessorWO<T, 2> out, const AccessorRO<T, 2> in1, const T in2, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
out[x][y] = MAX(in1[x][y], in2);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_2d_inplace(const AccessorRW<T, 2> out, const T in, const Point<2> origin, const Point<1> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y], in);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_3d(const AccessorWO<T, 3> out, const AccessorRO<T, 3> in1, const T in2, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
out[x][y][z] = MAX(in1[x][y][z], in2);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_binmax_broadcast_3d_inplace(const AccessorRW<T, 3> out, const T in, const Point<3> origin, const Point<2> pitch,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
MaxReduction<T>::template fold<true /*exclusive*/>(out[x][y][z], in);
}
template<typename T>
/*static*/ void BinMaxBroadcast<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
assert(task->futures.size() == 1);
const T in2 = task->futures[0].get_result<T>();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 1) {
const AccessorRW<T, 1> out = derez.unpack_accessor_RW<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_binmax_broadcast_1d_inplace<T><<<blocks, THREADS_PER_BLOCK>>>(out, in2, rect.lo, volume);
} else {
const AccessorWO<T, 1> out = derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_binmax_broadcast_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in1, in2, rect.lo, volume);
}
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 1) {
const AccessorRW<T, 2> out = derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_binmax_broadcast_2d_inplace<T><<<blocks, THREADS_PER_BLOCK>>>(out, in2, rect.lo, Point<1>(pitch), volume);
} else {
const AccessorWO<T, 2> out = derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_binmax_broadcast_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in1, in2, rect.lo, Point<1>(pitch), volume);
}
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
if (task->regions.size() == 1) {
const AccessorRW<T, 3> out = derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_binmax_broadcast_3d_inplace<T><<<blocks, THREADS_PER_BLOCK>>>(out, in2, rect.lo, Point<2>(pitch), volume);
} else {
const AccessorWO<T, 3> out = derez.unpack_accessor_WO<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_binmax_broadcast_3d<T><<<blocks, THREADS_PER_BLOCK>>>(out, in1, in2, rect.lo, Point<2>(pitch), volume);
}
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(BinMaxBroadcast, gpu_variant)
template<typename T, int DIM>
struct MaxRadixArgs {
AccessorRO<T, DIM> in[MAX_REDUCTION_RADIX];
};
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_radix_1d(const AccessorWO<T, 1> out, const MaxRadixArgs<T, 1> args, const size_t argmax, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
T val = args.in[0][x];
for (unsigned idx = 1; idx < argmax; idx++)
MaxReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x]);
out[x] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_radix_2d(const AccessorWO<T, 2> out, const MaxRadixArgs<T, 2> args, const size_t argmax, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = args.in[0][x][y];
for (unsigned idx = 1; idx < argmax; idx++)
MaxReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y]);
out[x][y] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_max_radix_3d(const AccessorWO<T, 3> out, const MaxRadixArgs<T, 3> args, const size_t argmax, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = args.in[0][x][y][z];
for (unsigned idx = 1; idx < argmax; idx++)
MaxReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y][z]);
out[x][y][z] = val;
}
template<typename T>
/*static*/ void MaxRadixTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
assert(task->regions.size() <= MAX_REDUCTION_RADIX);
const int radix = derez.unpack_dimension();
const int extra_dim_out = derez.unpack_dimension();
const int extra_dim_in = derez.unpack_dimension();
const int dim = derez.unpack_dimension();
const coord_t offset = (extra_dim_in >= 0) ? task->index_point[extra_dim_in] * radix : 0;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 1> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
MaxRadixArgs<T, 1> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 1>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_max_radix_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, args, num_inputs, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 2> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
MaxRadixArgs<T, 2> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 2>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_max_radix_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, args, num_inputs, rect.lo, Point<1>(pitch), volume);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 3> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 3>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 3>(regions[0], rect);
MaxRadixArgs<T, 3> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 3>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_max_radix_3d<T><<<blocks, THREADS_PER_BLOCK>>>(out, args, num_inputs, rect.lo, Point<2>(pitch), volume);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(MaxRadixTask, gpu_variant)
} // namespace numpy
} // namespace legate
|
f87fde945230c1d7151618fd8155fe1692f0f65d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcSurfactantsGradientNormWHA.cu
*
* Created on: 17-10-2014
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
#include <stdio.h>
__device__ static real4 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real r = hypot(dpos.x, dpos.y);
real q = r * par->I_H;
if ((p[j].a > 0.000001f) && (q < 2.0) && ((i != j) || ((i == j) && (q > 0.001f*par->I_H)))) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real val = p[j].o * p[j].m / p[j].d;
return MAKE_REAL4(val*gkx*dpos.x, val*gkx*dpos.y, val*gky*dpos.x, val*gky*dpos.y);
}
else
{
return MAKE_REAL4(0.0, 0.0, 0.0, 0.0);
}
}
__global__ void calcSurfactantsGradientNormWHA(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real4 result = MAKE_REAL4(0.0, 0.0, 0.0, 0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
real matrixDeterminant = result.w * result.x - result.y * result.z;
real4 matrix;
if (matrixDeterminant > 0.0)
{
matrix.x = result.w / matrixDeterminant;
matrix.y = -result.y / matrixDeterminant;
matrix.z = -result.z / matrixDeterminant;
matrix.w = result.x / matrixDeterminant;
}
else
{
matrix = MAKE_REAL4(0.0, 0.0, 0.0, 0.0);
}
real surfGradX = (matrix.x * p[index].cSurfGrad.x + matrix.y * p[index].cSurfGrad.y) * p[index].n.z * p[index].dSurf;
real surfGradY = (matrix.z * p[index].cSurfGrad.x + matrix.w * p[index].cSurfGrad.y) * p[index].n.z * p[index].dSurf;
p[index].cSurfGrad.x = surfGradX;
p[index].cSurfGrad.y = surfGradY;
}
}
| f87fde945230c1d7151618fd8155fe1692f0f65d.cu | /*
* calcSurfactantsGradientNormWHA.cu
*
* Created on: 17-10-2014
* Author: Kamil Szewc ([email protected])
*/
#include <math.h>
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/interactions.cuh"
#include <stdio.h>
__device__ static real4 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par)
{
real r = hypot(dpos.x, dpos.y);
real q = r * par->I_H;
if ((p[j].a > 0.000001f) && (q < 2.0) && ((i != j) || ((i == j) && (q > 0.001f*par->I_H)))) {
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real val = p[j].o * p[j].m / p[j].d;
return MAKE_REAL4(val*gkx*dpos.x, val*gkx*dpos.y, val*gky*dpos.x, val*gky*dpos.y);
}
else
{
return MAKE_REAL4(0.0, 0.0, 0.0, 0.0);
}
}
__global__ void calcSurfactantsGradientNormWHA(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
register real4 result = MAKE_REAL4(0.0, 0.0, 0.0, 0.0);
#include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh"
real matrixDeterminant = result.w * result.x - result.y * result.z;
real4 matrix;
if (matrixDeterminant > 0.0)
{
matrix.x = result.w / matrixDeterminant;
matrix.y = -result.y / matrixDeterminant;
matrix.z = -result.z / matrixDeterminant;
matrix.w = result.x / matrixDeterminant;
}
else
{
matrix = MAKE_REAL4(0.0, 0.0, 0.0, 0.0);
}
real surfGradX = (matrix.x * p[index].cSurfGrad.x + matrix.y * p[index].cSurfGrad.y) * p[index].n.z * p[index].dSurf;
real surfGradY = (matrix.z * p[index].cSurfGrad.x + matrix.w * p[index].cSurfGrad.y) * p[index].n.z * p[index].dSurf;
p[index].cSurfGrad.x = surfGradX;
p[index].cSurfGrad.y = surfGradY;
}
}
|
2651bdaa99c040f6327b0341c07017796d70e0df.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include "fileRead.h"
#include<iostream>
#include<math.h>
#include<float.h>
#include <ctime>
#include <ratio>
#include <chrono>
#define BLOCK_SIZE 1
using namespace std;
using namespace std::chrono;
int total_time =0;
void debug(int n){
// cout<<"asd "<<n<<endl;
}
__global__ void VectorSub(double *a, double *b, double *c, int n)
{
int i = threadIdx.x;
if(i < n)
c[i] = a[i] - b[i];
}
void printMatrix(int m, int n, double*A, int lda, const char* name)
{
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
double Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
// Matrix Matrix Multiplication
__global__ void gpu_matrix_matrix(double *a,double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[i * m + row] * b[n * col + i];
}
c[col * k + row] = sum;
}
}
// Matrix transpose
__global__ void gpu_matrix_transpose(double* mat_in, double* mat_out, unsigned int rows, unsigned int cols)
{
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < cols && row < rows)
{
unsigned int pos = col * rows + row;
unsigned int trans_pos = row * cols + col;
mat_out[trans_pos] = mat_in[pos];
}
}
int main(int argc, char*argv[])
{
hipsolverDnHandle_t cusolverH = NULL;
hipblasHandle_t cublasH = NULL;
hipblasStatus_t cublas_status = HIPBLAS_STATUS_SUCCESS;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
testAndTrainData tt = calcData();
vector<vector<double >> svd(10);
cout<<"Training Size of each Set"<<endl;
for(int i=0;i<10;i++){
cout<<tt.train[i].size()<<" "<<tt.train[i][0].size()<<endl;
}
for(int set=0;set<10;set++){
//------------------------
int m = tt.train[set][0].size();
int n = tt.train[set].size();
int lda = m;
double A[lda*n];
for(int j=0;j<m;j++){
for(int k=0;k<n;k++){
A[k*m + j] = tt.train[set][k][j];
}
}
double U[lda*m]; // m-by-m unitary matrix
double VT[lda*n]; // n-by-n unitary matrix
double S[n]; // singular value
double *d_A = NULL;
double *d_S = NULL;
double *d_U = NULL;
double *d_VT = NULL;
int *devInfo = NULL;
double *d_work = NULL;
double *d_rwork = NULL;
double *d_W = NULL; // W = S*VT
int lwork = 0;
int info_gpu = 0;
// printMatrix(m, n, A, lda, "A");
// printf("=====\n");
cusolver_status = hipsolverDnCreate(&cusolverH);
cublas_status = hipblasCreate(&cublasH);
hipMalloc ((void**)&d_A , sizeof(double)*lda*n);
hipMalloc ((void**)&d_S , sizeof(double)*n);
hipMalloc ((void**)&d_U , sizeof(double)*lda*m);
hipMalloc ((void**)&d_VT , sizeof(double)*lda*n);
hipMalloc ((void**)&devInfo, sizeof(int));
hipMalloc ((void**)&d_W , sizeof(double)*lda*n);
hipMemcpy(d_A, A, sizeof(double)*lda*n, hipMemcpyHostToDevice);
cusolver_status = hipsolverDnDgesvd_bufferSize(cusolverH, m, n, &lwork );
hipMalloc((void**)&d_work , sizeof(double)*lwork);
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
cusolver_status = hipsolverDnDgesvd (
cusolverH,
jobu,
jobvt,
m,
n,
d_A,
lda,
d_S,
d_U,
lda, // ldu
d_VT,
lda, // ldvt,
d_work,
lwork,
d_rwork,
devInfo);
hipDeviceSynchronize();
hipMemcpy(U , d_U , sizeof(double)*lda*m, hipMemcpyDeviceToHost);
hipMemcpy(VT, d_VT, sizeof(double)*lda*n, hipMemcpyDeviceToHost);
hipMemcpy(S , d_S , sizeof(double)*n , hipMemcpyDeviceToHost);
hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost);
// printf("after gesvd: info_gpu = %d\n", info_gpu);
// printf("=====\n");
/*
printf("S = (matlab base-1)\n");
printMatrix(n, 1, S, lda, "S");
printf("=====\n");
printf("U = (matlab base-1)\n");
printMatrix(m, m, U, lda, "U");
printf("=====\n");
printf("VT = (matlab base-1)\n");
printMatrix(n, n, VT, lda, "VT");
printf("=====\n");
*/
double Ui[20*lda];
for(int i=0;i<20*lda;i++){
Ui[i] = U[i];
}
/* printf("U = (matlab base-1)\n");
printMatrix(20, m, Ui, lda, "Ui");
printf("=====\n");
*/
int d1 = 20;
int d2 = lda;
double *tUi;
hipHostMalloc((void **) &tUi, sizeof(double)*d2*d1);
double *d_tUi,*d_Ui;
hipMalloc((void **) &d_tUi, sizeof(double)*d2*d1);
hipMalloc((void **) &d_Ui, sizeof(double)*d1*d2);
hipMemcpy(d_Ui, Ui, sizeof(double)*d1*d2, hipMemcpyHostToDevice);
dim3 dimGrid((d2 + BLOCK_SIZE - 1) / BLOCK_SIZE, (d1 + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( gpu_matrix_transpose), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Ui,d_tUi,d1,d2);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
hipMemcpy(tUi, d_tUi, sizeof(double)*d2*d1, hipMemcpyDeviceToHost);
/*
printf("U = (matlab base-1)\n");
printMatrix(m, 20, tUi, lda, "Ut");
printf("=====\n");
*/
double *utu;
hipHostMalloc((void **) &utu, sizeof(double)*d2*d2);
double *d_utu;
hipMalloc((void **) &d_tUi, sizeof(double)*d2*d1);
hipMalloc((void **) &d_utu, sizeof(double)*d2*d2);
// copy matrix A and B from host to device memory
hipMemcpy(d_Ui, Ui, sizeof(double)*d1*d2, hipMemcpyHostToDevice);
hipMemcpy(d_tUi, tUi, sizeof(double)*d2*d1, hipMemcpyHostToDevice);
dim3 dimGrid1((d2 + BLOCK_SIZE - 1) / BLOCK_SIZE, (d2 + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock1(BLOCK_SIZE, BLOCK_SIZE);
start = high_resolution_clock::now();
hipLaunchKernelGGL(( gpu_matrix_matrix), dim3(dimGrid1), dim3(dimBlock1), 0, 0, d_tUi, d_Ui, d_utu, d2, d1, d2);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
hipMemcpy(utu, d_utu, sizeof(double)*d2*d2, hipMemcpyDeviceToHost);
//printf("U = (matlab base-1)\n");
//printMatrix(m, m, utu, lda, "U");
//printf("=====\n");
//------------------------
//adding utu to svd[set]
// cout<<"SVD"<<endl;
for(int i=0;i<d2*d2;i++){
svd[set].push_back(utu[i]);
// cout<<set<<"----"<<utu[i]<<endl;
}
hipDeviceReset();
}
cout<<"Training Completed"<<endl;
//prediction
vector<double> accuracy(10);
int total_img=0, total_acc=0;
//host;
double *result , *svdU;
hipHostMalloc((void **) &result , 784 * sizeof(double));
hipHostMalloc((void **) &svdU , 784*784 * sizeof(double));
double *img;
hipHostMalloc((void **) &img , 784 * sizeof(double));
double *sub;
hipHostMalloc((void **) &sub , 784 * sizeof(double));
//device;
double *d_result , *d_svdU;
hipMalloc((void **) &d_result, sizeof(double)* 784);
hipMalloc((void **) &d_svdU , 784*784 * sizeof(double));
double *d_img;
hipMalloc((void **) &d_img , 784 * sizeof(double));
double *d_sub;
hipMalloc((void **) &d_sub , 784 * sizeof(double));
for(int set=0;set<10;set++){
int acc = 0;
dim3 dimGrid2((784 + BLOCK_SIZE - 1) / BLOCK_SIZE, (784 + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE);
for(int row=0;row<tt.test[set].size();row++){
for(int i=0;i<784;i++){
img[i] = tt.test[set][row][i];
}
hipMemcpy(d_img, img, 784 * sizeof(double), hipMemcpyDeviceToHost);
double min = DBL_MAX;
//long double min = 999999999999999999999;
int index = 0;
//check for every svd;
for(int k=0;k<10;k++){
for(int i=0;i<784*784;i++){
svdU[i] = svd[k][i];
}
hipMemcpy(d_svdU, svdU , 784 * 784 * sizeof(double) , hipMemcpyDeviceToHost);
//UUT * img
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( gpu_matrix_matrix), dim3(dimGrid2), dim3(dimBlock2), 0, 0, d_svdU, d_img, d_result, 784, 784, 1);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
// result -img
start = high_resolution_clock::now();
hipLaunchKernelGGL(( VectorSub), dim3(1),dim3(784), 0, 0, d_result,d_img,d_sub,784);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
hipMemcpy(sub, d_sub, 784 * sizeof(double), hipMemcpyHostToDevice);
//sub * sub
double inner_product = 0.0;
for(int i=0;i<784;i++){
inner_product += sub[i]*sub[i];
}
/* printf("Result = (matlab base-1)\n");
printMatrix(784, 1, result, m, "Result");
printf("=====\n");
cout<<"InnerProduct SQRT - "<<sqrt(inner_product); */
if(min > sqrt(inner_product)){
min = sqrt(inner_product);
index = k;
}
}
if(index == set){
total_acc++;
acc++;
cout<<set<<"----"<<index<<endl;
}
}
accuracy[set] = (double)acc/(double)tt.test[set].size();
total_img += tt.test[set].size();
}
hipDeviceReset();
for(int set=0;set<10;set++){
cout<<"Accuracy of Set "<<set<<" is "<<accuracy[set]<<endl;
}
cout<<total_img<<"---"<<total_acc<<endl;
cout<<"Total Accurayc is "<<(double)total_acc/(double)total_img<<endl;
cout<<"Total Time - "<<total_time<<"---- No of threads - "<<BLOCK_SIZE<<endl;
return 0;
}
| 2651bdaa99c040f6327b0341c07017796d70e0df.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "fileRead.h"
#include<iostream>
#include<math.h>
#include<float.h>
#include <ctime>
#include <ratio>
#include <chrono>
#define BLOCK_SIZE 1
using namespace std;
using namespace std::chrono;
int total_time =0;
void debug(int n){
// cout<<"asd "<<n<<endl;
}
__global__ void VectorSub(double *a, double *b, double *c, int n)
{
int i = threadIdx.x;
if(i < n)
c[i] = a[i] - b[i];
}
void printMatrix(int m, int n, double*A, int lda, const char* name)
{
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
double Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
// Matrix Matrix Multiplication
__global__ void gpu_matrix_matrix(double *a,double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[i * m + row] * b[n * col + i];
}
c[col * k + row] = sum;
}
}
// Matrix transpose
__global__ void gpu_matrix_transpose(double* mat_in, double* mat_out, unsigned int rows, unsigned int cols)
{
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < cols && row < rows)
{
unsigned int pos = col * rows + row;
unsigned int trans_pos = row * cols + col;
mat_out[trans_pos] = mat_in[pos];
}
}
int main(int argc, char*argv[])
{
cusolverDnHandle_t cusolverH = NULL;
cublasHandle_t cublasH = NULL;
cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
testAndTrainData tt = calcData();
vector<vector<double >> svd(10);
cout<<"Training Size of each Set"<<endl;
for(int i=0;i<10;i++){
cout<<tt.train[i].size()<<" "<<tt.train[i][0].size()<<endl;
}
for(int set=0;set<10;set++){
//------------------------
int m = tt.train[set][0].size();
int n = tt.train[set].size();
int lda = m;
double A[lda*n];
for(int j=0;j<m;j++){
for(int k=0;k<n;k++){
A[k*m + j] = tt.train[set][k][j];
}
}
double U[lda*m]; // m-by-m unitary matrix
double VT[lda*n]; // n-by-n unitary matrix
double S[n]; // singular value
double *d_A = NULL;
double *d_S = NULL;
double *d_U = NULL;
double *d_VT = NULL;
int *devInfo = NULL;
double *d_work = NULL;
double *d_rwork = NULL;
double *d_W = NULL; // W = S*VT
int lwork = 0;
int info_gpu = 0;
// printMatrix(m, n, A, lda, "A");
// printf("=====\n");
cusolver_status = cusolverDnCreate(&cusolverH);
cublas_status = cublasCreate(&cublasH);
cudaMalloc ((void**)&d_A , sizeof(double)*lda*n);
cudaMalloc ((void**)&d_S , sizeof(double)*n);
cudaMalloc ((void**)&d_U , sizeof(double)*lda*m);
cudaMalloc ((void**)&d_VT , sizeof(double)*lda*n);
cudaMalloc ((void**)&devInfo, sizeof(int));
cudaMalloc ((void**)&d_W , sizeof(double)*lda*n);
cudaMemcpy(d_A, A, sizeof(double)*lda*n, cudaMemcpyHostToDevice);
cusolver_status = cusolverDnDgesvd_bufferSize(cusolverH, m, n, &lwork );
cudaMalloc((void**)&d_work , sizeof(double)*lwork);
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
cusolver_status = cusolverDnDgesvd (
cusolverH,
jobu,
jobvt,
m,
n,
d_A,
lda,
d_S,
d_U,
lda, // ldu
d_VT,
lda, // ldvt,
d_work,
lwork,
d_rwork,
devInfo);
cudaDeviceSynchronize();
cudaMemcpy(U , d_U , sizeof(double)*lda*m, cudaMemcpyDeviceToHost);
cudaMemcpy(VT, d_VT, sizeof(double)*lda*n, cudaMemcpyDeviceToHost);
cudaMemcpy(S , d_S , sizeof(double)*n , cudaMemcpyDeviceToHost);
cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost);
// printf("after gesvd: info_gpu = %d\n", info_gpu);
// printf("=====\n");
/*
printf("S = (matlab base-1)\n");
printMatrix(n, 1, S, lda, "S");
printf("=====\n");
printf("U = (matlab base-1)\n");
printMatrix(m, m, U, lda, "U");
printf("=====\n");
printf("VT = (matlab base-1)\n");
printMatrix(n, n, VT, lda, "VT");
printf("=====\n");
*/
double Ui[20*lda];
for(int i=0;i<20*lda;i++){
Ui[i] = U[i];
}
/* printf("U = (matlab base-1)\n");
printMatrix(20, m, Ui, lda, "Ui");
printf("=====\n");
*/
int d1 = 20;
int d2 = lda;
double *tUi;
cudaMallocHost((void **) &tUi, sizeof(double)*d2*d1);
double *d_tUi,*d_Ui;
cudaMalloc((void **) &d_tUi, sizeof(double)*d2*d1);
cudaMalloc((void **) &d_Ui, sizeof(double)*d1*d2);
cudaMemcpy(d_Ui, Ui, sizeof(double)*d1*d2, cudaMemcpyHostToDevice);
dim3 dimGrid((d2 + BLOCK_SIZE - 1) / BLOCK_SIZE, (d1 + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
auto start = high_resolution_clock::now();
gpu_matrix_transpose<<<dimGrid,dimBlock>>>(d_Ui,d_tUi,d1,d2);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
cudaMemcpy(tUi, d_tUi, sizeof(double)*d2*d1, cudaMemcpyDeviceToHost);
/*
printf("U = (matlab base-1)\n");
printMatrix(m, 20, tUi, lda, "Ut");
printf("=====\n");
*/
double *utu;
cudaMallocHost((void **) &utu, sizeof(double)*d2*d2);
double *d_utu;
cudaMalloc((void **) &d_tUi, sizeof(double)*d2*d1);
cudaMalloc((void **) &d_utu, sizeof(double)*d2*d2);
// copy matrix A and B from host to device memory
cudaMemcpy(d_Ui, Ui, sizeof(double)*d1*d2, cudaMemcpyHostToDevice);
cudaMemcpy(d_tUi, tUi, sizeof(double)*d2*d1, cudaMemcpyHostToDevice);
dim3 dimGrid1((d2 + BLOCK_SIZE - 1) / BLOCK_SIZE, (d2 + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock1(BLOCK_SIZE, BLOCK_SIZE);
start = high_resolution_clock::now();
gpu_matrix_matrix<<<dimGrid1, dimBlock1>>>(d_tUi, d_Ui, d_utu, d2, d1, d2);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
cudaMemcpy(utu, d_utu, sizeof(double)*d2*d2, cudaMemcpyDeviceToHost);
//printf("U = (matlab base-1)\n");
//printMatrix(m, m, utu, lda, "U");
//printf("=====\n");
//------------------------
//adding utu to svd[set]
// cout<<"SVD"<<endl;
for(int i=0;i<d2*d2;i++){
svd[set].push_back(utu[i]);
// cout<<set<<"----"<<utu[i]<<endl;
}
cudaDeviceReset();
}
cout<<"Training Completed"<<endl;
//prediction
vector<double> accuracy(10);
int total_img=0, total_acc=0;
//host;
double *result , *svdU;
cudaMallocHost((void **) &result , 784 * sizeof(double));
cudaMallocHost((void **) &svdU , 784*784 * sizeof(double));
double *img;
cudaMallocHost((void **) &img , 784 * sizeof(double));
double *sub;
cudaMallocHost((void **) &sub , 784 * sizeof(double));
//device;
double *d_result , *d_svdU;
cudaMalloc((void **) &d_result, sizeof(double)* 784);
cudaMalloc((void **) &d_svdU , 784*784 * sizeof(double));
double *d_img;
cudaMalloc((void **) &d_img , 784 * sizeof(double));
double *d_sub;
cudaMalloc((void **) &d_sub , 784 * sizeof(double));
for(int set=0;set<10;set++){
int acc = 0;
dim3 dimGrid2((784 + BLOCK_SIZE - 1) / BLOCK_SIZE, (784 + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock2(BLOCK_SIZE, BLOCK_SIZE);
for(int row=0;row<tt.test[set].size();row++){
for(int i=0;i<784;i++){
img[i] = tt.test[set][row][i];
}
cudaMemcpy(d_img, img, 784 * sizeof(double), cudaMemcpyDeviceToHost);
double min = DBL_MAX;
//long double min = 999999999999999999999;
int index = 0;
//check for every svd;
for(int k=0;k<10;k++){
for(int i=0;i<784*784;i++){
svdU[i] = svd[k][i];
}
cudaMemcpy(d_svdU, svdU , 784 * 784 * sizeof(double) , cudaMemcpyDeviceToHost);
//UUT * img
auto start = high_resolution_clock::now();
gpu_matrix_matrix<<<dimGrid2, dimBlock2>>>(d_svdU, d_img, d_result, 784, 784, 1);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
// result -img
start = high_resolution_clock::now();
VectorSub<<<1,784>>>(d_result,d_img,d_sub,784);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop-start);
//cout<<duration.count()<<endl;
total_time += duration.count();
cudaMemcpy(sub, d_sub, 784 * sizeof(double), cudaMemcpyHostToDevice);
//sub * sub
double inner_product = 0.0;
for(int i=0;i<784;i++){
inner_product += sub[i]*sub[i];
}
/* printf("Result = (matlab base-1)\n");
printMatrix(784, 1, result, m, "Result");
printf("=====\n");
cout<<"InnerProduct SQRT - "<<sqrt(inner_product); */
if(min > sqrt(inner_product)){
min = sqrt(inner_product);
index = k;
}
}
if(index == set){
total_acc++;
acc++;
cout<<set<<"----"<<index<<endl;
}
}
accuracy[set] = (double)acc/(double)tt.test[set].size();
total_img += tt.test[set].size();
}
cudaDeviceReset();
for(int set=0;set<10;set++){
cout<<"Accuracy of Set "<<set<<" is "<<accuracy[set]<<endl;
}
cout<<total_img<<"---"<<total_acc<<endl;
cout<<"Total Accurayc is "<<(double)total_acc/(double)total_img<<endl;
cout<<"Total Time - "<<total_time<<"---- No of threads - "<<BLOCK_SIZE<<endl;
return 0;
}
|
9a8ed219887416b687ffd3a670d9158e03d3787b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Inverse Discrete Sine Transform in Column wise (DST one)
* DST_I_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_I_Column_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DSTI_Column_Inverse_Kernelx(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = __cosf((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrtf(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrtf(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrtf(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = __sinf(((((threadIdx.x + k*TILE_DIM) + 1)*PI_d*(Row + 1)) / (numARows + 1)))*sqrtf(2.0 / (numARows + 1)); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTInverseColumnOneS(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Inverse_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Inverse_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| 9a8ed219887416b687ffd3a670d9158e03d3787b.cu | /*
* Inverse Discrete Sine Transform in Column wise (DST one)
* DST_I_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_I_Column_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DSTI_Column_Inverse_Kernelx(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = __cosf((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrtf(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrtf(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrtf(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = __sinf(((((threadIdx.x + k*TILE_DIM) + 1)*PI_d*(Row + 1)) / (numARows + 1)))*sqrtf(2.0 / (numARows + 1)); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTInverseColumnOneS(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Inverse_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Inverse_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
int8x8x32.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/batched_matrix_mul/int8x8x32.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include <hip/hip_runtime.h>
#include "./int8x8x32.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
template <typename SmemConfig>
__device__ __forceinline__ void Global2SharedMem<SmemConfig>::gmem2reg_cpy() {
if (tr) {
int32_t cpy_reg[SmemConfig::smem_row][SmemConfig::smem_col / 4];
if (aligned) {
if (SmemConfig::smem_row <= check_bound_row &&
SmemConfig::smem_col <= check_bound_col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row; ++row) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
cpy_reg[row][col] = *(reinterpret_cast<const int32_t*>(
&g_ptr[row * ld_src + col * 4]));
}
}
} else {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row; ++row) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
int32_t val = 0;
if (row < check_bound_row && col * 4 < check_bound_col)
val = *(reinterpret_cast<const int32_t*>(
&g_ptr[row * ld_src + col * 4]));
cpy_reg[row][col] = val;
}
}
}
} else {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row; ++row) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
int32_t val = 0;
if (row < check_bound_row && col * 4 < check_bound_col)
val = (int32_t)0xff & g_ptr[row * ld_src + col * 4];
if (row < check_bound_row && (col * 4 + 1) < check_bound_col)
val |= (((int32_t)0xff & g_ptr[row * ld_src + col * 4 + 1])
<< 8);
if (row < check_bound_row && (col * 4 + 2) < check_bound_col)
val |= (((int32_t)0xff & g_ptr[row * ld_src + col * 4 + 2])
<< 16);
if (row < check_bound_row && (col * 4 + 3) < check_bound_col)
val |= (((int32_t)0xff & g_ptr[row * ld_src + col * 4 + 3])
<< 24);
cpy_reg[row][col] = val;
}
}
}
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
int32_t src0 = cpy_reg[row * 4][col], src1 = cpy_reg[row * 4 + 1][col],
src2 = cpy_reg[row * 4 + 2][col],
src3 = cpy_reg[row * 4 + 3][col];
reg[col * 4 + 3][row] =
((src3 >> 24 & 0xff) << 24) | ((src2 >> 24 & 0xff) << 16) |
((src1 >> 24 & 0xff) << 8) | (src0 >> 24 & 0xff);
reg[col * 4 + 2][row] =
((src3 >> 16 & 0xff) << 24) | ((src2 >> 16 & 0xff) << 16) |
((src1 >> 16 & 0xff) << 8) | (src0 >> 16 & 0xff);
reg[col * 4 + 1][row] = ((src3 >> 8 & 0xff) << 24) |
((src2 >> 8 & 0xff) << 16) |
((src1 >> 8 & 0xff) << 8) | (src0 >> 8 & 0xff);
reg[col * 4][row] = ((src3 & 0xff) << 24) | ((src2 & 0xff) << 16) |
((src1 & 0xff) << 8) | (src0 & 0xff);
}
}
} else {
if (aligned) {
if (SmemConfig::smem_row <= check_bound_row &&
SmemConfig::smem_col <= check_bound_col) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
reg[col][row] = *(reinterpret_cast<const int32_t*>(
&g_ptr[col * ld_src + row * 4]));
}
}
} else {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
int32_t val = 0;
if (row * 4 < check_bound_row && col < check_bound_col)
val = *(reinterpret_cast<const int32_t*>(
&g_ptr[col * ld_src + row * 4]));
reg[col][row] = val;
}
}
}
} else {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
int32_t val = 0;
if (col < check_bound_col && row * 4 < check_bound_row)
val = (int32_t)0xff & g_ptr[col * ld_src + row * 4];
if (col < check_bound_col && (row * 4 + 1) < check_bound_row)
val |= (((int32_t)0xff & g_ptr[col * ld_src + row * 4 + 1])
<< 8);
if (col < check_bound_col && (row * 4 + 2) < check_bound_row)
val |= (((int32_t)0xff & g_ptr[col * ld_src + row * 4 + 2])
<< 16);
if (col < check_bound_col && (row * 4 + 3) < check_bound_row)
val |= (((int32_t)0xff & g_ptr[col * ld_src + row * 4 + 3])
<< 24);
reg[col][row] = val;
}
}
}
}
}
template <typename SmemConfig>
__device__ __forceinline__ void Global2SharedMem<SmemConfig>::reg2smem_cpy() {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
if (smem_off + row < smem_bound)
smem[smem_off + col * ld_dst + row] = reg[col][row];
}
}
}
template <typename SmemConfig>
__device__ __forceinline__ void Global2SharedMem<SmemConfig>::iter_forward() {
g_ptr += step;
}
template <typename UnrollConfig_, typename ThreadConfig_>
__global__ void batched_8x8x32_kern(
const int8_t* a, int lda, int sta, bool tra, const int8_t* b, int ldb, int stb,
bool trb, int32_t* c, int ldc, int stc, int m, int n, int k) {
typedef UnrollConfig_ UnrollConfig;
typedef ThreadConfig_ ThreadConfig;
int off_batch = blockIdx.z, off_m = blockIdx.x, off_n = blockIdx.y,
off_w = threadIdx.x, off_h = threadIdx.y,
tid_x = off_m * ThreadConfig::thread_x + off_w,
tid_y = off_n * ThreadConfig::thread_y + off_h;
static int const unroll = UnrollConfig::unroll, thread_k = UnrollConfig::thread_k,
load_m = UnrollConfig::load_m, load_n = UnrollConfig::load_n;
typedef SmemConfig<unroll, load_m> SmemA;
typedef SmemConfig<unroll, load_n> SmemB;
typedef Global2SharedMem<SmemA> gl2sh_type_a;
typedef Global2SharedMem<SmemB> gl2sh_type_b;
extern __shared__ int32_t smem[];
int idx_m = off_h / thread_k * load_m + tid_x * UnrollConfig::unroll_m,
idx_n = off_w / thread_k * load_n + tid_y * UnrollConfig::unroll_n,
idx_k_a = off_h % thread_k, idx_k_b = off_w % thread_k;
int off_a = tra ? (off_batch * lda + idx_m + idx_k_a * unroll * sta)
: (off_batch * lda + idx_m * sta + idx_k_a * unroll);
int off_b = trb ? (off_batch * ldb + idx_n * stb + idx_k_b * unroll)
: (off_batch * ldb + idx_n + idx_k_b * unroll * stb);
int off_c = off_batch * ldc + tid_x * UnrollConfig::unroll_m * stc +
tid_y * UnrollConfig::unroll_n;
int32_t* ptr_c = nullptr;
int32_t* smem_a = reinterpret_cast<int32_t*>(smem);
int32_t* smem_b = reinterpret_cast<int32_t*>(
&smem_a[(UnrollConfig::unroll_k / 4) * UnrollConfig::block_m]);
int off_smem_a = (off_w * UnrollConfig::unroll_m + (off_h / thread_k) * load_m) *
UnrollConfig::unroll_k / 4,
off_smem_b = (off_h * UnrollConfig::unroll_n + (off_w / thread_k) * load_n) *
UnrollConfig::unroll_k / 4;
int a_col = load_m;
if (a_col > m - idx_m)
a_col = m - idx_m;
if (a_col < 0) {
off_a = off_batch * lda;
off_c = -1;
a_col = 0;
}
int a_row = unroll;
if (a_row > k - idx_k_a * unroll)
a_row = k - idx_k_a * unroll;
if (a_row < 0) {
off_smem_a = 0;
a_row = 0;
}
int b_col = load_n;
if (b_col > n - idx_n) {
b_col = n - idx_n;
}
if (b_col < 0) {
off_b = off_batch * ldb;
off_c = -1;
b_col = 0;
}
int b_row = unroll;
if (b_row > k - idx_k_b * unroll)
b_row = k - idx_k_b * unroll;
if (b_row < 0) {
off_smem_b = 0;
b_row = 0;
}
if (off_c != -1)
ptr_c = &c[off_c];
int step_a = tra ? UnrollConfig::unroll_k * sta : UnrollConfig::unroll_k,
step_b = trb ? UnrollConfig::unroll_k : UnrollConfig::unroll_k * stb;
bool al_a = tra ? (m % 4 == 0) : (k % 4 == 0),
al_b = trb ? (k % 4 == 0) : (n % 4 == 0);
gl2sh_type_a gl2sh_a(
&smem_a[off_smem_a], idx_k_a * unroll / 4, UnrollConfig::unroll_k / 4, sta,
UnrollConfig::unroll_k / 4, a_row, a_col, step_a, tra, al_a);
gl2sh_type_b gl2sh_b(
&smem_b[off_smem_b], idx_k_b * unroll / 4, UnrollConfig::unroll_k / 4, stb,
UnrollConfig::unroll_k / 4, b_row, b_col, step_b, !trb, al_b);
gl2sh_a.g_ptr = &a[off_a];
gl2sh_b.g_ptr = &b[off_b];
gl2sh_a.gmem2reg_cpy();
gl2sh_b.gmem2reg_cpy();
int32_t sum[UnrollConfig::unroll_m * UnrollConfig::unroll_n];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j)
sum[i * UnrollConfig::unroll_n + j] = 0;
for (int k_out = k; k_out > 0; k_out -= UnrollConfig::unroll_k) {
gl2sh_a.reg2smem_cpy();
gl2sh_b.reg2smem_cpy();
if (k_out > UnrollConfig::unroll_k) {
gl2sh_a.iter_forward();
gl2sh_b.iter_forward();
if (gl2sh_a.check_bound_row >
k_out - UnrollConfig::unroll_k - idx_k_a * unroll) {
gl2sh_a.check_bound_row =
k_out - UnrollConfig::unroll_k - idx_k_a * unroll;
if (gl2sh_a.check_bound_row < 0)
gl2sh_a.check_bound_row = 0;
}
if (gl2sh_b.check_bound_row >
k_out - UnrollConfig::unroll_k - idx_k_b * unroll) {
gl2sh_b.check_bound_row =
k_out - UnrollConfig::unroll_k - idx_k_b * unroll;
if (gl2sh_b.check_bound_row < 0)
gl2sh_b.check_bound_row = 0;
}
gl2sh_a.gmem2reg_cpy();
gl2sh_b.gmem2reg_cpy();
}
__syncthreads();
if (off_c != -1) {
int32_t reg_a[UnrollConfig::unroll_m], reg_b[UnrollConfig::unroll_n];
#pragma unroll
for (int k_in = 0; k_in < UnrollConfig::unroll_k / 4 && k_in * 4 < k_out;
++k_in) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
reg_a[i] =
smem_a[(off_w * UnrollConfig::unroll_m + i) *
UnrollConfig::unroll_k / 4 +
k_in];
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j)
reg_b[j] =
smem_b[(off_h * UnrollConfig::unroll_n + j) *
UnrollConfig::unroll_k / 4 +
k_in];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
dot_prod(
reg_a[i], reg_b[j], sum[i * UnrollConfig::unroll_n + j],
sum[i * UnrollConfig::unroll_n + j]);
}
}
}
__syncthreads();
}
if (off_c != -1) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j)
if (tid_x * UnrollConfig::unroll_m + i < m &&
tid_y * UnrollConfig::unroll_n + j < n)
*(ptr_c + i * stc + j) = sum[i * UnrollConfig::unroll_n + j];
}
}
void exec_igemm_8x8x32(
const int8_t* A, const int8_t* B, int32_t* C, const int batch_count,
const int m, const int n, const int k, int ldA, int ldB, int ldC, int stA,
int stB, int stC, bool transA, bool transB, hipStream_t stream) {
static int const unroll_m = 8, unroll_n = 8, unroll_k = 32, unroll = 4;
typedef ThreadConfig<8, 8> Thread;
typedef UnrollConfig<Thread, unroll_m, unroll_n, unroll_k, unroll> Unroll;
dim3 block(Thread::thread_x, Thread::thread_y);
dim3 grid;
grid.x = (m + Unroll::block_m - 1) / Unroll::block_m;
grid.y = (n + Unroll::block_n - 1) / Unroll::block_n;
grid.z = batch_count;
static uint32_t shared_storage =
(Unroll::block_m + Unroll::block_n) * Unroll::unroll_k * sizeof(int8_t);
void (*kern)(
const int8_t* a, int lda, int sta, bool tra, const int8_t* b, int ldb,
int stb, bool trb, int32_t* c, int ldc, int stc, int m, int n, int k) =
batched_8x8x32_kern<Unroll, Thread>;
hipLaunchKernelGGL(( kern), dim3(grid), dim3(block), shared_storage, stream,
A, ldA, stA, transA, B, ldB, stB, transB, C, ldC, stC, m, n, k);
after_kernel_launch();
}
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
| int8x8x32.cu | /**
* \file dnn/src/cuda/batched_matrix_mul/int8x8x32.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include <cuda.h>
#include "./int8x8x32.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
template <typename SmemConfig>
__device__ __forceinline__ void Global2SharedMem<SmemConfig>::gmem2reg_cpy() {
if (tr) {
int32_t cpy_reg[SmemConfig::smem_row][SmemConfig::smem_col / 4];
if (aligned) {
if (SmemConfig::smem_row <= check_bound_row &&
SmemConfig::smem_col <= check_bound_col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row; ++row) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
cpy_reg[row][col] = *(reinterpret_cast<const int32_t*>(
&g_ptr[row * ld_src + col * 4]));
}
}
} else {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row; ++row) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
int32_t val = 0;
if (row < check_bound_row && col * 4 < check_bound_col)
val = *(reinterpret_cast<const int32_t*>(
&g_ptr[row * ld_src + col * 4]));
cpy_reg[row][col] = val;
}
}
}
} else {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row; ++row) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
int32_t val = 0;
if (row < check_bound_row && col * 4 < check_bound_col)
val = (int32_t)0xff & g_ptr[row * ld_src + col * 4];
if (row < check_bound_row && (col * 4 + 1) < check_bound_col)
val |= (((int32_t)0xff & g_ptr[row * ld_src + col * 4 + 1])
<< 8);
if (row < check_bound_row && (col * 4 + 2) < check_bound_col)
val |= (((int32_t)0xff & g_ptr[row * ld_src + col * 4 + 2])
<< 16);
if (row < check_bound_row && (col * 4 + 3) < check_bound_col)
val |= (((int32_t)0xff & g_ptr[row * ld_src + col * 4 + 3])
<< 24);
cpy_reg[row][col] = val;
}
}
}
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col / 4; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
int32_t src0 = cpy_reg[row * 4][col], src1 = cpy_reg[row * 4 + 1][col],
src2 = cpy_reg[row * 4 + 2][col],
src3 = cpy_reg[row * 4 + 3][col];
reg[col * 4 + 3][row] =
((src3 >> 24 & 0xff) << 24) | ((src2 >> 24 & 0xff) << 16) |
((src1 >> 24 & 0xff) << 8) | (src0 >> 24 & 0xff);
reg[col * 4 + 2][row] =
((src3 >> 16 & 0xff) << 24) | ((src2 >> 16 & 0xff) << 16) |
((src1 >> 16 & 0xff) << 8) | (src0 >> 16 & 0xff);
reg[col * 4 + 1][row] = ((src3 >> 8 & 0xff) << 24) |
((src2 >> 8 & 0xff) << 16) |
((src1 >> 8 & 0xff) << 8) | (src0 >> 8 & 0xff);
reg[col * 4][row] = ((src3 & 0xff) << 24) | ((src2 & 0xff) << 16) |
((src1 & 0xff) << 8) | (src0 & 0xff);
}
}
} else {
if (aligned) {
if (SmemConfig::smem_row <= check_bound_row &&
SmemConfig::smem_col <= check_bound_col) {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
reg[col][row] = *(reinterpret_cast<const int32_t*>(
&g_ptr[col * ld_src + row * 4]));
}
}
} else {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
int32_t val = 0;
if (row * 4 < check_bound_row && col < check_bound_col)
val = *(reinterpret_cast<const int32_t*>(
&g_ptr[col * ld_src + row * 4]));
reg[col][row] = val;
}
}
}
} else {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
int32_t val = 0;
if (col < check_bound_col && row * 4 < check_bound_row)
val = (int32_t)0xff & g_ptr[col * ld_src + row * 4];
if (col < check_bound_col && (row * 4 + 1) < check_bound_row)
val |= (((int32_t)0xff & g_ptr[col * ld_src + row * 4 + 1])
<< 8);
if (col < check_bound_col && (row * 4 + 2) < check_bound_row)
val |= (((int32_t)0xff & g_ptr[col * ld_src + row * 4 + 2])
<< 16);
if (col < check_bound_col && (row * 4 + 3) < check_bound_row)
val |= (((int32_t)0xff & g_ptr[col * ld_src + row * 4 + 3])
<< 24);
reg[col][row] = val;
}
}
}
}
}
template <typename SmemConfig>
__device__ __forceinline__ void Global2SharedMem<SmemConfig>::reg2smem_cpy() {
#pragma unroll
for (int col = 0; col < SmemConfig::smem_col; ++col) {
#pragma unroll
for (int row = 0; row < SmemConfig::smem_row / 4; ++row) {
if (smem_off + row < smem_bound)
smem[smem_off + col * ld_dst + row] = reg[col][row];
}
}
}
template <typename SmemConfig>
__device__ __forceinline__ void Global2SharedMem<SmemConfig>::iter_forward() {
g_ptr += step;
}
template <typename UnrollConfig_, typename ThreadConfig_>
__global__ void batched_8x8x32_kern(
const int8_t* a, int lda, int sta, bool tra, const int8_t* b, int ldb, int stb,
bool trb, int32_t* c, int ldc, int stc, int m, int n, int k) {
typedef UnrollConfig_ UnrollConfig;
typedef ThreadConfig_ ThreadConfig;
int off_batch = blockIdx.z, off_m = blockIdx.x, off_n = blockIdx.y,
off_w = threadIdx.x, off_h = threadIdx.y,
tid_x = off_m * ThreadConfig::thread_x + off_w,
tid_y = off_n * ThreadConfig::thread_y + off_h;
static int const unroll = UnrollConfig::unroll, thread_k = UnrollConfig::thread_k,
load_m = UnrollConfig::load_m, load_n = UnrollConfig::load_n;
typedef SmemConfig<unroll, load_m> SmemA;
typedef SmemConfig<unroll, load_n> SmemB;
typedef Global2SharedMem<SmemA> gl2sh_type_a;
typedef Global2SharedMem<SmemB> gl2sh_type_b;
extern __shared__ int32_t smem[];
int idx_m = off_h / thread_k * load_m + tid_x * UnrollConfig::unroll_m,
idx_n = off_w / thread_k * load_n + tid_y * UnrollConfig::unroll_n,
idx_k_a = off_h % thread_k, idx_k_b = off_w % thread_k;
int off_a = tra ? (off_batch * lda + idx_m + idx_k_a * unroll * sta)
: (off_batch * lda + idx_m * sta + idx_k_a * unroll);
int off_b = trb ? (off_batch * ldb + idx_n * stb + idx_k_b * unroll)
: (off_batch * ldb + idx_n + idx_k_b * unroll * stb);
int off_c = off_batch * ldc + tid_x * UnrollConfig::unroll_m * stc +
tid_y * UnrollConfig::unroll_n;
int32_t* ptr_c = nullptr;
int32_t* smem_a = reinterpret_cast<int32_t*>(smem);
int32_t* smem_b = reinterpret_cast<int32_t*>(
&smem_a[(UnrollConfig::unroll_k / 4) * UnrollConfig::block_m]);
int off_smem_a = (off_w * UnrollConfig::unroll_m + (off_h / thread_k) * load_m) *
UnrollConfig::unroll_k / 4,
off_smem_b = (off_h * UnrollConfig::unroll_n + (off_w / thread_k) * load_n) *
UnrollConfig::unroll_k / 4;
int a_col = load_m;
if (a_col > m - idx_m)
a_col = m - idx_m;
if (a_col < 0) {
off_a = off_batch * lda;
off_c = -1;
a_col = 0;
}
int a_row = unroll;
if (a_row > k - idx_k_a * unroll)
a_row = k - idx_k_a * unroll;
if (a_row < 0) {
off_smem_a = 0;
a_row = 0;
}
int b_col = load_n;
if (b_col > n - idx_n) {
b_col = n - idx_n;
}
if (b_col < 0) {
off_b = off_batch * ldb;
off_c = -1;
b_col = 0;
}
int b_row = unroll;
if (b_row > k - idx_k_b * unroll)
b_row = k - idx_k_b * unroll;
if (b_row < 0) {
off_smem_b = 0;
b_row = 0;
}
if (off_c != -1)
ptr_c = &c[off_c];
int step_a = tra ? UnrollConfig::unroll_k * sta : UnrollConfig::unroll_k,
step_b = trb ? UnrollConfig::unroll_k : UnrollConfig::unroll_k * stb;
bool al_a = tra ? (m % 4 == 0) : (k % 4 == 0),
al_b = trb ? (k % 4 == 0) : (n % 4 == 0);
gl2sh_type_a gl2sh_a(
&smem_a[off_smem_a], idx_k_a * unroll / 4, UnrollConfig::unroll_k / 4, sta,
UnrollConfig::unroll_k / 4, a_row, a_col, step_a, tra, al_a);
gl2sh_type_b gl2sh_b(
&smem_b[off_smem_b], idx_k_b * unroll / 4, UnrollConfig::unroll_k / 4, stb,
UnrollConfig::unroll_k / 4, b_row, b_col, step_b, !trb, al_b);
gl2sh_a.g_ptr = &a[off_a];
gl2sh_b.g_ptr = &b[off_b];
gl2sh_a.gmem2reg_cpy();
gl2sh_b.gmem2reg_cpy();
int32_t sum[UnrollConfig::unroll_m * UnrollConfig::unroll_n];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j)
sum[i * UnrollConfig::unroll_n + j] = 0;
for (int k_out = k; k_out > 0; k_out -= UnrollConfig::unroll_k) {
gl2sh_a.reg2smem_cpy();
gl2sh_b.reg2smem_cpy();
if (k_out > UnrollConfig::unroll_k) {
gl2sh_a.iter_forward();
gl2sh_b.iter_forward();
if (gl2sh_a.check_bound_row >
k_out - UnrollConfig::unroll_k - idx_k_a * unroll) {
gl2sh_a.check_bound_row =
k_out - UnrollConfig::unroll_k - idx_k_a * unroll;
if (gl2sh_a.check_bound_row < 0)
gl2sh_a.check_bound_row = 0;
}
if (gl2sh_b.check_bound_row >
k_out - UnrollConfig::unroll_k - idx_k_b * unroll) {
gl2sh_b.check_bound_row =
k_out - UnrollConfig::unroll_k - idx_k_b * unroll;
if (gl2sh_b.check_bound_row < 0)
gl2sh_b.check_bound_row = 0;
}
gl2sh_a.gmem2reg_cpy();
gl2sh_b.gmem2reg_cpy();
}
__syncthreads();
if (off_c != -1) {
int32_t reg_a[UnrollConfig::unroll_m], reg_b[UnrollConfig::unroll_n];
#pragma unroll
for (int k_in = 0; k_in < UnrollConfig::unroll_k / 4 && k_in * 4 < k_out;
++k_in) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
reg_a[i] =
smem_a[(off_w * UnrollConfig::unroll_m + i) *
UnrollConfig::unroll_k / 4 +
k_in];
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j)
reg_b[j] =
smem_b[(off_h * UnrollConfig::unroll_n + j) *
UnrollConfig::unroll_k / 4 +
k_in];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
dot_prod(
reg_a[i], reg_b[j], sum[i * UnrollConfig::unroll_n + j],
sum[i * UnrollConfig::unroll_n + j]);
}
}
}
__syncthreads();
}
if (off_c != -1) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_m; ++i)
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j)
if (tid_x * UnrollConfig::unroll_m + i < m &&
tid_y * UnrollConfig::unroll_n + j < n)
*(ptr_c + i * stc + j) = sum[i * UnrollConfig::unroll_n + j];
}
}
void exec_igemm_8x8x32(
const int8_t* A, const int8_t* B, int32_t* C, const int batch_count,
const int m, const int n, const int k, int ldA, int ldB, int ldC, int stA,
int stB, int stC, bool transA, bool transB, cudaStream_t stream) {
static int const unroll_m = 8, unroll_n = 8, unroll_k = 32, unroll = 4;
typedef ThreadConfig<8, 8> Thread;
typedef UnrollConfig<Thread, unroll_m, unroll_n, unroll_k, unroll> Unroll;
dim3 block(Thread::thread_x, Thread::thread_y);
dim3 grid;
grid.x = (m + Unroll::block_m - 1) / Unroll::block_m;
grid.y = (n + Unroll::block_n - 1) / Unroll::block_n;
grid.z = batch_count;
static uint32_t shared_storage =
(Unroll::block_m + Unroll::block_n) * Unroll::unroll_k * sizeof(int8_t);
void (*kern)(
const int8_t* a, int lda, int sta, bool tra, const int8_t* b, int ldb,
int stb, bool trb, int32_t* c, int ldc, int stc, int m, int n, int k) =
batched_8x8x32_kern<Unroll, Thread>;
kern<<<grid, block, shared_storage, stream>>>(
A, ldA, stA, transA, B, ldB, stB, transB, C, ldC, stC, m, n, k);
after_kernel_launch();
}
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
417ac04d441cbc3bae5b1a495af2c7ce252cbbe2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_poisson_kernel_stencil;
int xdim0_poisson_kernel_stencil_h = -1;
int ydim0_poisson_kernel_stencil_h = -1;
__constant__ int xdim1_poisson_kernel_stencil;
int xdim1_poisson_kernel_stencil_h = -1;
int ydim1_poisson_kernel_stencil_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y) (x + xdim0_poisson_kernel_stencil * (y))
#define OPS_ACC1(x, y) (x + xdim1_poisson_kernel_stencil * (y))
// user function
__device__
void
poisson_kernel_stencil_gpu(const double *u, double *u2) {
u2[OPS_ACC1(0, 0)] =
((u[OPS_ACC0(-1, 0)] - 2.0f * u[OPS_ACC0(0, 0)] + u[OPS_ACC0(1, 0)]) *
0.125f +
(u[OPS_ACC0(0, -1)] - 2.0f * u[OPS_ACC0(0, 0)] + u[OPS_ACC0(0, 1)]) *
0.125f +
u[OPS_ACC0(0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_poisson_kernel_stencil(const double *__restrict arg0,
double *__restrict arg1, int size0,
int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_poisson_kernel_stencil;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_poisson_kernel_stencil;
if (idx_x < size0 && idx_y < size1) {
poisson_kernel_stencil_gpu(arg0, arg1);
}
}
// host stub function
void ops_par_loop_poisson_kernel_stencil(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 3))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(3, "poisson_kernel_stencil");
OPS_kernels[3].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != xdim0_poisson_kernel_stencil_h ||
xdim1 != xdim1_poisson_kernel_stencil_h) {
hipMemcpyToSymbol(xdim0_poisson_kernel_stencil, &xdim0, sizeof(int));
xdim0_poisson_kernel_stencil_h = xdim0;
hipMemcpyToSymbol(xdim1_poisson_kernel_stencil, &xdim1, sizeof(int));
xdim1_poisson_kernel_stencil_h = xdim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[3].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_poisson_kernel_stencil), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], x_size, y_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[3].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[3].mpi_time += t2 - t1;
OPS_kernels[3].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[3].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 417ac04d441cbc3bae5b1a495af2c7ce252cbbe2.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_poisson_kernel_stencil;
int xdim0_poisson_kernel_stencil_h = -1;
int ydim0_poisson_kernel_stencil_h = -1;
__constant__ int xdim1_poisson_kernel_stencil;
int xdim1_poisson_kernel_stencil_h = -1;
int ydim1_poisson_kernel_stencil_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y) (x + xdim0_poisson_kernel_stencil * (y))
#define OPS_ACC1(x, y) (x + xdim1_poisson_kernel_stencil * (y))
// user function
__device__
void
poisson_kernel_stencil_gpu(const double *u, double *u2) {
u2[OPS_ACC1(0, 0)] =
((u[OPS_ACC0(-1, 0)] - 2.0f * u[OPS_ACC0(0, 0)] + u[OPS_ACC0(1, 0)]) *
0.125f +
(u[OPS_ACC0(0, -1)] - 2.0f * u[OPS_ACC0(0, 0)] + u[OPS_ACC0(0, 1)]) *
0.125f +
u[OPS_ACC0(0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_poisson_kernel_stencil(const double *__restrict arg0,
double *__restrict arg1, int size0,
int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_poisson_kernel_stencil;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_poisson_kernel_stencil;
if (idx_x < size0 && idx_y < size1) {
poisson_kernel_stencil_gpu(arg0, arg1);
}
}
// host stub function
void ops_par_loop_poisson_kernel_stencil(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 3))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(3, "poisson_kernel_stencil");
OPS_kernels[3].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != xdim0_poisson_kernel_stencil_h ||
xdim1 != xdim1_poisson_kernel_stencil_h) {
cudaMemcpyToSymbol(xdim0_poisson_kernel_stencil, &xdim0, sizeof(int));
xdim0_poisson_kernel_stencil_h = xdim0;
cudaMemcpyToSymbol(xdim1_poisson_kernel_stencil, &xdim1, sizeof(int));
xdim1_poisson_kernel_stencil_h = xdim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[3].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_poisson_kernel_stencil<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], x_size, y_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[3].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[3].mpi_time += t2 - t1;
OPS_kernels[3].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[3].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
752280587103ce9e9b570d8f1b91d76c5cd019c2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Author: Lee Namgoo
* E-Mail: [email protected]
*/
#include <hip/hip_runtime.h>
#include <opencv2/opencv.hpp>
#include <time.h>
#include <math.h>
#include <iostream>
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if (err != hipSuccess) {
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
#define CLIP(a) (a < 0U ? 0U : a > 255U ? 255U : a)
/**
* The __global__ qualifier declares a function as being a kernel.
* 1. Executed on the device
* 2. Callable from the host
* 3. Must have void return type
* 4. Must specify its execution configuration
* 5. Call to this function is asynchronous, it returns before the device has
* completed its execution.
*/
__global__ void
SobelX(unsigned char* const input, unsigned char* const output, const int width,
const int height, const int input_step, const int output_step)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int input_center = input_step * yIndex + xIndex;
const int output_index = output_step * yIndex + xIndex;
if ((xIndex == 0) || (xIndex == width - 1)
|| (yIndex == 0) || (yIndex == height - 1)) {
output[output_index] = input[input_center];
return;
}
if ((xIndex < width) && (yIndex < height)) {
int tmp = input_center - input_step;
int tmp2 = input_center + input_step;
int sobel_x = input[tmp - 1] + input[tmp] * 2 + input[tmp + 1]
- input[tmp2 - 1] - input[tmp2] * 2 - input[tmp2 + 1];
output[output_index] = CLIP(sobel_x);
}
}
__global__ void
SobelY(unsigned char* const input, unsigned char* const output, const int width,
const int height, const int input_step, const int output_step)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int input_center = input_step * yIndex + xIndex;
const int output_index = output_step * yIndex + xIndex;
if ((xIndex == 0) || (xIndex == width - 1)
|| (yIndex == 0) || (yIndex == height - 1)) {
output[output_index] = input[input_center];
return;
}
if ((xIndex < width) && (yIndex < height)) {
int tmp = input_center - 1;
int tmp2 = input_center + 1;
int sobel_y = input[tmp - input_step] + input[tmp] * 2 + input[tmp + input_step]
- input[tmp2 - input_step] - input[tmp2] * 2 - input[tmp2 + input_step];
output[output_index] = CLIP(sobel_y);
}
}
__global__ void
Sobel(unsigned char* const input, unsigned char* const output, const int width,
const int height, const int input_step, const int output_step)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int input_center = input_step * yIndex + xIndex;
const int output_index = output_step * yIndex + xIndex;
if ((xIndex == 0) || (xIndex == width - 1)
|| (yIndex == 0) || (yIndex == height - 1)) {
output[output_index] = input[input_center];
return;
}
const int SobelX[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
const int SobelY[9] = {1, 0, -1, 2, 0, -2, 1, 0, -1};
if ((xIndex < width) && (yIndex < height)) {
/* Assembly Level Optimization Possible */
int sumX = 0, sumY = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
int data = input[input_center + i * input_step + j];
sumX += data * SobelX[(i + 1) * 3 + j + 1];
sumY += data * SobelY[(i + 1) * 3 + j + 1];
}
}
output[output_index] = sqrt((double)(sumX * sumX + sumY * sumY) / 32);
}
}
/**
* The __host__ qualifier declares a function that is
* 1. Executed on the host
* 2. Callable from the host only
*/
__host__ bool
sobel_cuda(const cv::Mat& h_input, cv::Mat& h_output, int type)
{
unsigned char* d_input;
unsigned char* d_output;
/* Note that input and output Mats' step may differ */
const int input_size = h_input.step * h_input.rows;
const int output_size = h_output.step * h_output.rows;
SAFE_CALL(hipMalloc<unsigned char>(&d_input, input_size), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc<unsigned char>(&d_output, output_size), "CUDA Malloc Failed");
SAFE_CALL(hipMemcpy(d_input, h_input.ptr(), input_size, hipMemcpyHostToDevice),
"CUDA Memcpy Host To Device Failed");
const dim3 threadsPerBlock(32, 32); /* upper limit : 1024 */
const dim3 blocksPerGrid(
(h_output.cols + threadsPerBlock.x - 1) / threadsPerBlock.x,
(h_output.rows + threadsPerBlock.y - 1) / threadsPerBlock.y);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
clock_t begin = clock();
switch (type) {
case 0:
hipEventRecord(start);
hipLaunchKernelGGL(( Sobel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_input, d_output, h_output.cols,
h_output.rows, h_input.step, h_output.step);
hipEventRecord(stop);
break;
case 1:
hipLaunchKernelGGL(( SobelX), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_input, d_output, h_output.cols,
h_output.rows, h_input.step, h_output.step);
break;
case 2:
hipLaunchKernelGGL(( SobelY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_input, d_output, h_output.cols,
h_output.rows, h_input.step, h_output.step);
break;
}
/* Wait until CUDA is done */
SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed");
SAFE_CALL(hipMemcpy(h_output.ptr(), d_output, output_size, hipMemcpyDeviceToHost),
"CUDA Memcpy Device To Host Failed");
std::cout << "Kernel execution time : " << (float)(clock() - begin) /
(CLOCKS_PER_SEC / 1000) << "ms\n";
//hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << "Kernel execution time (CUDA event) : " << milliseconds <<
"ms\n";
SAFE_CALL(hipFree(d_output), "CUDA Free Failed");
SAFE_CALL(hipFree(d_input), "CUDA Free Failed");
return true;
}
| 752280587103ce9e9b570d8f1b91d76c5cd019c2.cu | /*
* Author: Lee Namgoo
* E-Mail: [email protected]
*/
#include <cuda_runtime.h>
#include <opencv2/opencv.hpp>
#include <time.h>
#include <math.h>
#include <iostream>
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
#define CLIP(a) (a < 0U ? 0U : a > 255U ? 255U : a)
/**
* The __global__ qualifier declares a function as being a kernel.
* 1. Executed on the device
* 2. Callable from the host
* 3. Must have void return type
* 4. Must specify its execution configuration
* 5. Call to this function is asynchronous, it returns before the device has
* completed its execution.
*/
__global__ void
SobelX(unsigned char* const input, unsigned char* const output, const int width,
const int height, const int input_step, const int output_step)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int input_center = input_step * yIndex + xIndex;
const int output_index = output_step * yIndex + xIndex;
if ((xIndex == 0) || (xIndex == width - 1)
|| (yIndex == 0) || (yIndex == height - 1)) {
output[output_index] = input[input_center];
return;
}
if ((xIndex < width) && (yIndex < height)) {
int tmp = input_center - input_step;
int tmp2 = input_center + input_step;
int sobel_x = input[tmp - 1] + input[tmp] * 2 + input[tmp + 1]
- input[tmp2 - 1] - input[tmp2] * 2 - input[tmp2 + 1];
output[output_index] = CLIP(sobel_x);
}
}
__global__ void
SobelY(unsigned char* const input, unsigned char* const output, const int width,
const int height, const int input_step, const int output_step)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int input_center = input_step * yIndex + xIndex;
const int output_index = output_step * yIndex + xIndex;
if ((xIndex == 0) || (xIndex == width - 1)
|| (yIndex == 0) || (yIndex == height - 1)) {
output[output_index] = input[input_center];
return;
}
if ((xIndex < width) && (yIndex < height)) {
int tmp = input_center - 1;
int tmp2 = input_center + 1;
int sobel_y = input[tmp - input_step] + input[tmp] * 2 + input[tmp + input_step]
- input[tmp2 - input_step] - input[tmp2] * 2 - input[tmp2 + input_step];
output[output_index] = CLIP(sobel_y);
}
}
__global__ void
Sobel(unsigned char* const input, unsigned char* const output, const int width,
const int height, const int input_step, const int output_step)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int input_center = input_step * yIndex + xIndex;
const int output_index = output_step * yIndex + xIndex;
if ((xIndex == 0) || (xIndex == width - 1)
|| (yIndex == 0) || (yIndex == height - 1)) {
output[output_index] = input[input_center];
return;
}
const int SobelX[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
const int SobelY[9] = {1, 0, -1, 2, 0, -2, 1, 0, -1};
if ((xIndex < width) && (yIndex < height)) {
/* Assembly Level Optimization Possible */
int sumX = 0, sumY = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
int data = input[input_center + i * input_step + j];
sumX += data * SobelX[(i + 1) * 3 + j + 1];
sumY += data * SobelY[(i + 1) * 3 + j + 1];
}
}
output[output_index] = sqrt((double)(sumX * sumX + sumY * sumY) / 32);
}
}
/**
* The __host__ qualifier declares a function that is
* 1. Executed on the host
* 2. Callable from the host only
*/
__host__ bool
sobel_cuda(const cv::Mat& h_input, cv::Mat& h_output, int type)
{
unsigned char* d_input;
unsigned char* d_output;
/* Note that input and output Mats' step may differ */
const int input_size = h_input.step * h_input.rows;
const int output_size = h_output.step * h_output.rows;
SAFE_CALL(cudaMalloc<unsigned char>(&d_input, input_size), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc<unsigned char>(&d_output, output_size), "CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_input, h_input.ptr(), input_size, cudaMemcpyHostToDevice),
"CUDA Memcpy Host To Device Failed");
const dim3 threadsPerBlock(32, 32); /* upper limit : 1024 */
const dim3 blocksPerGrid(
(h_output.cols + threadsPerBlock.x - 1) / threadsPerBlock.x,
(h_output.rows + threadsPerBlock.y - 1) / threadsPerBlock.y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
clock_t begin = clock();
switch (type) {
case 0:
cudaEventRecord(start);
Sobel<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_output, h_output.cols,
h_output.rows, h_input.step, h_output.step);
cudaEventRecord(stop);
break;
case 1:
SobelX<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_output, h_output.cols,
h_output.rows, h_input.step, h_output.step);
break;
case 2:
SobelY<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_output, h_output.cols,
h_output.rows, h_input.step, h_output.step);
break;
}
/* Wait until CUDA is done */
SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed");
SAFE_CALL(cudaMemcpy(h_output.ptr(), d_output, output_size, cudaMemcpyDeviceToHost),
"CUDA Memcpy Device To Host Failed");
std::cout << "Kernel execution time : " << (float)(clock() - begin) /
(CLOCKS_PER_SEC / 1000) << "ms\n";
//cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Kernel execution time (CUDA event) : " << milliseconds <<
"ms\n";
SAFE_CALL(cudaFree(d_output), "CUDA Free Failed");
SAFE_CALL(cudaFree(d_input), "CUDA Free Failed");
return true;
}
|
e83218ffc1ca21654d4b157bcb8196114b89548a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020 ETH Zurich. All Rights Reserved.
#include "wall_with_velocity.h"
#include "common_kernels.h"
#include "stationary_walls/box.h"
#include "stationary_walls/cylinder.h"
#include "stationary_walls/plane.h"
#include "stationary_walls/sdf.h"
#include "stationary_walls/sphere.h"
#include "velocity_field/oscillate.h"
#include "velocity_field/rotate.h"
#include "velocity_field/translate.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/root_finder.h>
#include <cassert>
#include <cmath>
#include <fstream>
#include <texture_types.h>
namespace mirheo
{
template<typename VelocityField>
__global__ void imposeVelField(PVview view, const VelocityField velField)
{
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
Particle p(view.readParticle(pid));
p.u = velField(p.r);
view.writeParticle(pid, p);
}
//===============================================================================================
// Member functions
//===============================================================================================
template<class InsideWallChecker, class VelocityField>
WallWithVelocity<InsideWallChecker, VelocityField>::WallWithVelocity
(const MirState *state, const std::string& name, InsideWallChecker&& insideWallChecker, VelocityField&& velField) :
SimpleStationaryWall<InsideWallChecker>(state, name, std::move(insideWallChecker)),
velField_(std::move(velField))
{}
template<class InsideWallChecker, class VelocityField>
void WallWithVelocity<InsideWallChecker, VelocityField>::setup(MPI_Comm& comm)
{
info("Setting up wall %s", this->getCName());
CUDA_Check( hipDeviceSynchronize() );
this->insideWallChecker_.setup(comm, this->getState()->domain);
velField_.setup(this->getState()->currentTime, this->getState()->domain);
CUDA_Check( hipDeviceSynchronize() );
}
template<class InsideWallChecker, class VelocityField>
void WallWithVelocity<InsideWallChecker, VelocityField>::attachFrozen(ParticleVector *pv)
{
SimpleStationaryWall<InsideWallChecker>::attachFrozen(pv);
const int nthreads = 128;
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
imposeVelField,
getNblocks(view.size, nthreads), nthreads, 0, 0,
view, velField_.handler() );
CUDA_Check( hipDeviceSynchronize() );
}
template<class InsideWallChecker, class VelocityField>
void WallWithVelocity<InsideWallChecker, VelocityField>::bounce(hipStream_t stream)
{
real t = this->getState()->currentTime;
real dt = this->getState()->dt;
velField_.setup(t, this->getState()->domain);
this->bounceForce_.clear(stream);
for (size_t i = 0; i < this->particleVectors_.size(); ++i)
{
auto pv = this->particleVectors_[i];
auto cl = this->cellLists_[i];
auto& bc = this->boundaryCells_[i];
auto view = cl->CellList::getView<PVviewWithOldParticles>();
debug2("Bouncing %d %s particles with wall velocity, %zu boundary cells",
pv->local()->size(), pv->getCName(), bc.size());
const int nthreads = 64;
SAFE_KERNEL_LAUNCH(
bounce_kernels::sdfBounce,
getNblocks(bc.size(), nthreads), nthreads, 0, stream,
view, cl->cellInfo(), bc.devPtr(), bc.size(), dt,
this->insideWallChecker_.handler(),
velField_.handler(),
this->bounceForce_.devPtr());
CUDA_Check( hipPeekAtLastError() );
}
}
template class WallWithVelocity<StationaryWallSphere, VelocityFieldRotate>;
template class WallWithVelocity<StationaryWallCylinder, VelocityFieldRotate>;
template class WallWithVelocity<StationaryWallPlane, VelocityFieldTranslate>;
template class WallWithVelocity<StationaryWallPlane, VelocityFieldOscillate>;
} // namespace mirheo
| e83218ffc1ca21654d4b157bcb8196114b89548a.cu | // Copyright 2020 ETH Zurich. All Rights Reserved.
#include "wall_with_velocity.h"
#include "common_kernels.h"
#include "stationary_walls/box.h"
#include "stationary_walls/cylinder.h"
#include "stationary_walls/plane.h"
#include "stationary_walls/sdf.h"
#include "stationary_walls/sphere.h"
#include "velocity_field/oscillate.h"
#include "velocity_field/rotate.h"
#include "velocity_field/translate.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/root_finder.h>
#include <cassert>
#include <cmath>
#include <fstream>
#include <texture_types.h>
namespace mirheo
{
template<typename VelocityField>
__global__ void imposeVelField(PVview view, const VelocityField velField)
{
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
Particle p(view.readParticle(pid));
p.u = velField(p.r);
view.writeParticle(pid, p);
}
//===============================================================================================
// Member functions
//===============================================================================================
template<class InsideWallChecker, class VelocityField>
WallWithVelocity<InsideWallChecker, VelocityField>::WallWithVelocity
(const MirState *state, const std::string& name, InsideWallChecker&& insideWallChecker, VelocityField&& velField) :
SimpleStationaryWall<InsideWallChecker>(state, name, std::move(insideWallChecker)),
velField_(std::move(velField))
{}
template<class InsideWallChecker, class VelocityField>
void WallWithVelocity<InsideWallChecker, VelocityField>::setup(MPI_Comm& comm)
{
info("Setting up wall %s", this->getCName());
CUDA_Check( cudaDeviceSynchronize() );
this->insideWallChecker_.setup(comm, this->getState()->domain);
velField_.setup(this->getState()->currentTime, this->getState()->domain);
CUDA_Check( cudaDeviceSynchronize() );
}
template<class InsideWallChecker, class VelocityField>
void WallWithVelocity<InsideWallChecker, VelocityField>::attachFrozen(ParticleVector *pv)
{
SimpleStationaryWall<InsideWallChecker>::attachFrozen(pv);
const int nthreads = 128;
PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
imposeVelField,
getNblocks(view.size, nthreads), nthreads, 0, 0,
view, velField_.handler() );
CUDA_Check( cudaDeviceSynchronize() );
}
template<class InsideWallChecker, class VelocityField>
void WallWithVelocity<InsideWallChecker, VelocityField>::bounce(cudaStream_t stream)
{
real t = this->getState()->currentTime;
real dt = this->getState()->dt;
velField_.setup(t, this->getState()->domain);
this->bounceForce_.clear(stream);
for (size_t i = 0; i < this->particleVectors_.size(); ++i)
{
auto pv = this->particleVectors_[i];
auto cl = this->cellLists_[i];
auto& bc = this->boundaryCells_[i];
auto view = cl->CellList::getView<PVviewWithOldParticles>();
debug2("Bouncing %d %s particles with wall velocity, %zu boundary cells",
pv->local()->size(), pv->getCName(), bc.size());
const int nthreads = 64;
SAFE_KERNEL_LAUNCH(
bounce_kernels::sdfBounce,
getNblocks(bc.size(), nthreads), nthreads, 0, stream,
view, cl->cellInfo(), bc.devPtr(), bc.size(), dt,
this->insideWallChecker_.handler(),
velField_.handler(),
this->bounceForce_.devPtr());
CUDA_Check( cudaPeekAtLastError() );
}
}
template class WallWithVelocity<StationaryWallSphere, VelocityFieldRotate>;
template class WallWithVelocity<StationaryWallCylinder, VelocityFieldRotate>;
template class WallWithVelocity<StationaryWallPlane, VelocityFieldTranslate>;
template class WallWithVelocity<StationaryWallPlane, VelocityFieldOscillate>;
} // namespace mirheo
|
f581b594724ca4329f304106be7d41de49182db3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <ctime>
#include <string>
#include <fstream>
#include <math.h>
using namespace std;
#define rawR 7
#define rawC 840
#define rawL (rawR*rawC)
#define LENGTH 840
#define BATCH 1
#define LENGTHPAD 1024
#define NRANK 2
static __global__ void cufftComplexScale(hipfftComplex *idata, hipfftComplex *odata, const int size, float scale)
{
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < size)
{
odata[threadID].x = idata[threadID].x * scale;
odata[threadID].y = idata[threadID].y * scale;
}
}
int main()
{
int n[NRANK] = {rawR, rawC};
// create arrays
float speed2d[rawR][rawC];
// read raw data
ifstream rawData("../data/speedData.txt");
// generate 2d speed data
if (!(rawData.is_open())){
cout<<"faild to read data." << endl;
}
for (int row=0; row<rawR; row++){
for (int col=0; col<rawC; col++){
rawData >> speed2d[row][col];
}
}
rawData.close();
// print array for debug
// for (int row=0; row<rawR; row++){
// for (int col=0; col<10; col++){
// cout << speed2d[row][col] << '\t';
// }
// cout << '\n';
// }
// host data pointer
hipfftComplex *CompData2d=(hipfftComplex*)malloc(rawC*rawR*sizeof(hipfftComplex));
// 2 d
for (int i=0; i<rawR; i++){
for (int j=0; j<rawC; j++){
CompData2d[i*rawC+j].x = speed2d[i][j];
CompData2d[i*rawC+j].y = 0;
}
}
hipfftComplex *d_fftData; // device data pointer
hipMalloc((void**)&d_fftData,rawC*rawR*sizeof(hipfftComplex));
hipMemcpy(d_fftData,CompData2d,rawC*rawR*sizeof(hipfftComplex),hipMemcpyHostToDevice);
// create the cuda event to count the running time for GPU
hipEvent_t start1, stop1;
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipfftHandle plan;
hipfftPlanMany(&plan, NRANK, n,
NULL, 1, 0,
NULL, 1, 0,
HIPFFT_C2C, BATCH);
// execute kernel
hipfftExecC2C(plan,(hipfftComplex*)d_fftData,(hipfftComplex*)d_fftData,HIPFFT_FORWARD);
hipEventRecord(start1);
hipfftExecC2C(plan, (hipfftComplex*)d_fftData, (hipfftComplex*)d_fftData, HIPFFT_BACKWARD);
dim3 dimBlock(1024);
dim3 dimGrid(6);
hipLaunchKernelGGL(( cufftComplexScale) , dim3(dimGrid), dim3(dimBlock), 0, 0, (hipfftComplex*)d_fftData,(hipfftComplex*)d_fftData,rawC*rawR,1.0f / (rawC*rawR));
hipEventRecord(stop1);
hipEventSynchronize(stop1);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start1, stop1);
cout << "GPU FFT time used: "<< milliseconds << " ms\n";
hipDeviceSynchronize();
hipMemcpy(CompData2d,d_fftData,rawC*rawR*sizeof(hipfftComplex)*BATCH,hipMemcpyDeviceToHost);
//store result to txt
ofstream myFile;
myFile.open("../data/2d_batch_inverse.txt");
for (int i=0; i<rawR; i++){
for (int j=0; j<rawC; j++){
myFile << CompData2d[i*rawC+j].x <<','<< CompData2d[i*rawC+j].y << endl;
}
}
hipfftDestroy(plan);
free(CompData2d);
hipFree(d_fftData);
return 0;
} | f581b594724ca4329f304106be7d41de49182db3.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <ctime>
#include <string>
#include <fstream>
#include <math.h>
using namespace std;
#define rawR 7
#define rawC 840
#define rawL (rawR*rawC)
#define LENGTH 840
#define BATCH 1
#define LENGTHPAD 1024
#define NRANK 2
static __global__ void cufftComplexScale(cufftComplex *idata, cufftComplex *odata, const int size, float scale)
{
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < size)
{
odata[threadID].x = idata[threadID].x * scale;
odata[threadID].y = idata[threadID].y * scale;
}
}
int main()
{
int n[NRANK] = {rawR, rawC};
// create arrays
float speed2d[rawR][rawC];
// read raw data
ifstream rawData("../data/speedData.txt");
// generate 2d speed data
if (!(rawData.is_open())){
cout<<"faild to read data." << endl;
}
for (int row=0; row<rawR; row++){
for (int col=0; col<rawC; col++){
rawData >> speed2d[row][col];
}
}
rawData.close();
// print array for debug
// for (int row=0; row<rawR; row++){
// for (int col=0; col<10; col++){
// cout << speed2d[row][col] << '\t';
// }
// cout << '\n';
// }
// host data pointer
cufftComplex *CompData2d=(cufftComplex*)malloc(rawC*rawR*sizeof(cufftComplex));
// 2 d
for (int i=0; i<rawR; i++){
for (int j=0; j<rawC; j++){
CompData2d[i*rawC+j].x = speed2d[i][j];
CompData2d[i*rawC+j].y = 0;
}
}
cufftComplex *d_fftData; // device data pointer
cudaMalloc((void**)&d_fftData,rawC*rawR*sizeof(cufftComplex));
cudaMemcpy(d_fftData,CompData2d,rawC*rawR*sizeof(cufftComplex),cudaMemcpyHostToDevice);
// create the cuda event to count the running time for GPU
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cufftHandle plan;
cufftPlanMany(&plan, NRANK, n,
NULL, 1, 0,
NULL, 1, 0,
CUFFT_C2C, BATCH);
// execute kernel
cufftExecC2C(plan,(cufftComplex*)d_fftData,(cufftComplex*)d_fftData,CUFFT_FORWARD);
cudaEventRecord(start1);
cufftExecC2C(plan, (cufftComplex*)d_fftData, (cufftComplex*)d_fftData, CUFFT_INVERSE);
dim3 dimBlock(1024);
dim3 dimGrid(6);
cufftComplexScale <<<dimGrid, dimBlock>>>((cufftComplex*)d_fftData,(cufftComplex*)d_fftData,rawC*rawR,1.0f / (rawC*rawR));
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start1, stop1);
cout << "GPU FFT time used: "<< milliseconds << " ms\n";
cudaDeviceSynchronize();
cudaMemcpy(CompData2d,d_fftData,rawC*rawR*sizeof(cufftComplex)*BATCH,cudaMemcpyDeviceToHost);
//store result to txt
ofstream myFile;
myFile.open("../data/2d_batch_inverse.txt");
for (int i=0; i<rawR; i++){
for (int j=0; j<rawC; j++){
myFile << CompData2d[i*rawC+j].x <<','<< CompData2d[i*rawC+j].y << endl;
}
}
cufftDestroy(plan);
free(CompData2d);
cudaFree(d_fftData);
return 0;
} |
2a0a28e04f35fac5f889e8f7fc1560f277b33b32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include <iostream>
#include <math.h>
#include <string>
#include <vector>
//cuda error checking macros
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define INDX( row, col ld) ( ( (col)* (ld) )+ (row) )
/*
Atom class and methods
*/
class Atom
{
private:
public:
int atomnum;
int resnum;
std::string code;
double x;
double y;
double z;
//std::vector<double> pos;
std::vector<double> GetXyzPosition(double x, double y, double z);
};
std::vector<double> Atom::GetXyzPosition(double x, double y, double z){
std::vector<double> pos;
pos.push_back(x);
pos.push_back(y);
pos.push_back(z);
return(pos);
}
/*
Protein class and methods
*/
class Protein
{
private:
public:
std::vector<Atom> residues;
// std::int length;
void Setup(std::string inputfile);
double ComputeDistance(Atom a1, Atom a2);
std::vector<double> ContactFeaturizer();
//double ComputeDistanceCuda(Atom a1, Atom a2);
// std::vector<double> ContactFeaturizerCuda();
};
void Protein::Setup(std::string inputfile){
std::ifstream f(inputfile.c_str());
if (f.is_open()) {
std::string klass, r_code, r_resname, chain;
int r_atomnum, r_resnum;
double r_x, r_y, r_z, occ, temp;
std::string elem;
while (f >> klass >> r_atomnum >> r_code >> r_resname
>> chain >> r_resnum >> r_x >> r_y >> r_z
>> occ >> temp >> elem){
Atom temp_atom;
temp_atom.atomnum = r_atomnum;
temp_atom.code = r_code;
temp_atom.resnum = r_resnum;
temp_atom.x = r_x;
temp_atom.y = r_y;
temp_atom.z = r_z;
residues.push_back(temp_atom);
std::cout << r_resnum << std::endl;
}
for(unsigned int k = 0; k < residues.size(); k++){
std::cout << residues[k].x << std::endl;
}
}
}
double Protein::ComputeDistance(Atom a1, Atom a2){
double sum, ans;
sum = pow((a1.x-a2.x),2) + pow((a1.y-a2.y),2) + pow((a1.z-a2.z),2);
// change to times
ans = sqrt(sum)/10.; // Divide by 10 to get Angstroms
return ans;
// leave as squared, change to Angs. later
}
std::vector<double> Protein::ContactFeaturizer(){
std::vector<double> distances;
for(unsigned int i = 0; i < residues.size() - 2; i++){
for(unsigned int j = i+3; j < residues.size(); j++){
distances.push_back(ComputeDistance(residues[i],residues[j]));
}
}
return distances;
}
/*
CU:
divide upper triangle of distance^2 matrix into blocks 1/4
the size of the next largest one
tile: the computing unit should be a square
first, compute the full square
then, compare to a bunch of files
dynamic size based on length of chain and GPU sharedmem size?
max, 10k residues - might not be big enough to notice a difference
in GPU implementation
plan on sending full traj to GPU from the outset
send full trajectory to GPU
memory / compute ratio ~ 1 / 10
compute to memory access ratio
do something that is fx of distance for more computations
quadroupole moments
heavy atom contact featurizers
eventually on the GPU everything will be linearized
*/
/*__global__ std::vector<double> Protein::ContactFeaturizerCuda(int * ){
std::vector<double> distances;
int i = threadIdx.x+blockIdx.x*blockDim.x;
int j = threadIdx.y+blockIdx.y*blockDim.y;
if j>(i+2) && j<residues.size()
}
return distances;
}*/
/*
Execute program
*/
int main(int argc, char *argv[])
{
if (argc != 2)
{
std::cout << "Usage:" << std::endl;
std::cout << " " << argv[0] << " <input file> " << std::endl;
return 0;
}
std::string inputfile = argv[1];
Protein prot;
std::vector<double> distances;
prot.Setup(inputfile);
distances = prot.ContactFeaturizer();
for(unsigned int k = 0; k < distances.size(); k++){
std::cout << distances[k] << std::endl;
}
int npoints = 5;
int size = npoints*3*sizeof(double);
int distances = npoints*npoints*sizeof(double);
return 0;
}
| 2a0a28e04f35fac5f889e8f7fc1560f277b33b32.cu | #include <fstream>
#include <iostream>
#include <math.h>
#include <string>
#include <vector>
//cuda error checking macros
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define INDX( row, col ld) ( ( (col)* (ld) )+ (row) )
/*
Atom class and methods
*/
class Atom
{
private:
public:
int atomnum;
int resnum;
std::string code;
double x;
double y;
double z;
//std::vector<double> pos;
std::vector<double> GetXyzPosition(double x, double y, double z);
};
std::vector<double> Atom::GetXyzPosition(double x, double y, double z){
std::vector<double> pos;
pos.push_back(x);
pos.push_back(y);
pos.push_back(z);
return(pos);
}
/*
Protein class and methods
*/
class Protein
{
private:
public:
std::vector<Atom> residues;
// std::int length;
void Setup(std::string inputfile);
double ComputeDistance(Atom a1, Atom a2);
std::vector<double> ContactFeaturizer();
//double ComputeDistanceCuda(Atom a1, Atom a2);
// std::vector<double> ContactFeaturizerCuda();
};
void Protein::Setup(std::string inputfile){
std::ifstream f(inputfile.c_str());
if (f.is_open()) {
std::string klass, r_code, r_resname, chain;
int r_atomnum, r_resnum;
double r_x, r_y, r_z, occ, temp;
std::string elem;
while (f >> klass >> r_atomnum >> r_code >> r_resname
>> chain >> r_resnum >> r_x >> r_y >> r_z
>> occ >> temp >> elem){
Atom temp_atom;
temp_atom.atomnum = r_atomnum;
temp_atom.code = r_code;
temp_atom.resnum = r_resnum;
temp_atom.x = r_x;
temp_atom.y = r_y;
temp_atom.z = r_z;
residues.push_back(temp_atom);
std::cout << r_resnum << std::endl;
}
for(unsigned int k = 0; k < residues.size(); k++){
std::cout << residues[k].x << std::endl;
}
}
}
double Protein::ComputeDistance(Atom a1, Atom a2){
double sum, ans;
sum = pow((a1.x-a2.x),2) + pow((a1.y-a2.y),2) + pow((a1.z-a2.z),2);
// change to times
ans = sqrt(sum)/10.; // Divide by 10 to get Angstroms
return ans;
// leave as squared, change to Angs. later
}
std::vector<double> Protein::ContactFeaturizer(){
std::vector<double> distances;
for(unsigned int i = 0; i < residues.size() - 2; i++){
for(unsigned int j = i+3; j < residues.size(); j++){
distances.push_back(ComputeDistance(residues[i],residues[j]));
}
}
return distances;
}
/*
CU:
divide upper triangle of distance^2 matrix into blocks 1/4
the size of the next largest one
tile: the computing unit should be a square
first, compute the full square
then, compare to a bunch of files
dynamic size based on length of chain and GPU sharedmem size?
max, 10k residues - might not be big enough to notice a difference
in GPU implementation
plan on sending full traj to GPU from the outset
send full trajectory to GPU
memory / compute ratio ~ 1 / 10
compute to memory access ratio
do something that is fx of distance for more computations
quadroupole moments
heavy atom contact featurizers
eventually on the GPU everything will be linearized
*/
/*__global__ std::vector<double> Protein::ContactFeaturizerCuda(int * ){
std::vector<double> distances;
int i = threadIdx.x+blockIdx.x*blockDim.x;
int j = threadIdx.y+blockIdx.y*blockDim.y;
if j>(i+2) && j<residues.size()
}
return distances;
}*/
/*
Execute program
*/
int main(int argc, char *argv[])
{
if (argc != 2)
{
std::cout << "Usage:" << std::endl;
std::cout << " " << argv[0] << " <input file> " << std::endl;
return 0;
}
std::string inputfile = argv[1];
Protein prot;
std::vector<double> distances;
prot.Setup(inputfile);
distances = prot.ContactFeaturizer();
for(unsigned int k = 0; k < distances.size(); k++){
std::cout << distances[k] << std::endl;
}
int npoints = 5;
int size = npoints*3*sizeof(double);
int distances = npoints*npoints*sizeof(double);
return 0;
}
|
c4e89e9e49c882c2bc4035b2e786f2b10a0d2f40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _WORK_H_
#define _WORK_H_
// Base 2 logarithm (expects a positive integer)
template<typename int_t> __host__ int_t
log2(int_t n) {
int_t l = 0;
while(n > 1) {
n >>= 1;
l += 1;
}
return l;
}
template<typename element_t> __host__ void
upSweep(element_t* array, bool* flags, bool* intermediate, int length, int max_threads) {
if(length == 0) return;
dim3 grid(1, 1, 1);
const int d_stop = log2(length) - 1;
for(int d = 0; d < d_stop; ++d)
{
int thread_count = (int) floor(((length - 1) / (1 << (d + 1)))) + 1;
int job_count = 1;
while(thread_count > max_threads)
{
thread_count >>= 1;
job_count <<= 1;
}
dim3 threads(thread_count, 1, 1);
hipLaunchKernelGGL(( upSweepKernel<element_t>), dim3(grid), dim3(threads), 0, 0, array, flags, intermediate, d, job_count);
}
}
template<typename element_t> __host__ void
downSweep(element_t* array, bool* flags, bool* intermediate, int length, int max_threads) {
if(length == 0) return;
array[length - 1] = 0;
dim3 grid(1, 1, 1);
const int d_start = log2(length) - 1;
for(int d = d_start; d >= 0; --d)
{
int thread_count = (int) floor(((length - 1) / (1 << (d + 1)))) + 1;
int job_count = 1;
while(thread_count > max_threads)
{
thread_count >>= 1;
job_count <<= 1;
}
dim3 threads(thread_count, 1, 1);
// downSweepKernel<element_t><<<grid, threads>>>(array, flags, intermediate, d, job_count);
}
}
#endif
| c4e89e9e49c882c2bc4035b2e786f2b10a0d2f40.cu | #ifndef _WORK_H_
#define _WORK_H_
// Base 2 logarithm (expects a positive integer)
template<typename int_t> __host__ int_t
log2(int_t n) {
int_t l = 0;
while(n > 1) {
n >>= 1;
l += 1;
}
return l;
}
template<typename element_t> __host__ void
upSweep(element_t* array, bool* flags, bool* intermediate, int length, int max_threads) {
if(length == 0) return;
dim3 grid(1, 1, 1);
const int d_stop = log2(length) - 1;
for(int d = 0; d < d_stop; ++d)
{
int thread_count = (int) floor(((length - 1) / (1 << (d + 1)))) + 1;
int job_count = 1;
while(thread_count > max_threads)
{
thread_count >>= 1;
job_count <<= 1;
}
dim3 threads(thread_count, 1, 1);
upSweepKernel<element_t><<<grid, threads>>>(array, flags, intermediate, d, job_count);
}
}
template<typename element_t> __host__ void
downSweep(element_t* array, bool* flags, bool* intermediate, int length, int max_threads) {
if(length == 0) return;
array[length - 1] = 0;
dim3 grid(1, 1, 1);
const int d_start = log2(length) - 1;
for(int d = d_start; d >= 0; --d)
{
int thread_count = (int) floor(((length - 1) / (1 << (d + 1)))) + 1;
int job_count = 1;
while(thread_count > max_threads)
{
thread_count >>= 1;
job_count <<= 1;
}
dim3 threads(thread_count, 1, 1);
// downSweepKernel<element_t><<<grid, threads>>>(array, flags, intermediate, d, job_count);
}
}
#endif
|
e04459a62e73a6451c76fe17d5cb769055a80275.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fast_pcl/ndt_gpu/NormalDistributionsTransform.h"
#include "fast_pcl/ndt_gpu/debug.h"
#include <cmath>
#include <iostream>
#include <pcl/common/transforms.h>
namespace gpu {
GNormalDistributionsTransform::GNormalDistributionsTransform()
{
//GRegistration::GRegistration();
gauss_d1_ = gauss_d2_ = 0;
outlier_ratio_ = 0.55;
step_size_ = 0.1;
resolution_ = 1.0f;
trans_probability_ = 0;
double gauss_c1, gauss_c2, gauss_d3;
// Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009]
gauss_c1 = 10.0 * (1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
gauss_d3 = -log (gauss_c2);
gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3;
gauss_d2_ = -2 * log ((-log ( gauss_c1 * exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);
transformation_epsilon_ = 0.1;
max_iterations_ = 35;
j_ang_ = MatrixHost(24, 1);
h_ang_ = MatrixHost(45, 1);
dj_ang_ = MatrixDevice(24, 1);
dh_ang_ = MatrixDevice(45, 1);
real_iterations_ = 0;
}
GNormalDistributionsTransform::GNormalDistributionsTransform(const GNormalDistributionsTransform &other)
{
gauss_d1_ = other.gauss_d1_;
gauss_d2_ = other.gauss_d2_;
outlier_ratio_ = other.outlier_ratio_;
j_ang_ = other.j_ang_;
h_ang_ = other.h_ang_;
dj_ang_ = other.dj_ang_;
dh_ang_ = other.dh_ang_;
step_size_ = other.step_size_;
resolution_ = other.resolution_;
trans_probability_ = other.trans_probability_;
real_iterations_ = other.real_iterations_;
voxel_grid_ = other.voxel_grid_;
}
GNormalDistributionsTransform::~GNormalDistributionsTransform()
{
dj_ang_.memFree();
dh_ang_.memFree();
}
void GNormalDistributionsTransform::setStepSize(double step_size)
{
step_size_ = step_size;
}
void GNormalDistributionsTransform::setResolution(float resolution)
{
resolution_ = resolution;
}
void GNormalDistributionsTransform::setOutlierRatio(double olr)
{
outlier_ratio_ = olr;
}
double GNormalDistributionsTransform::getStepSize() const
{
return step_size_;
}
float GNormalDistributionsTransform::getResolution() const
{
return resolution_;
}
double GNormalDistributionsTransform::getOutlierRatio() const
{
return outlier_ratio_;
}
double GNormalDistributionsTransform::getTransformationProbability() const
{
return trans_probability_;
}
int GNormalDistributionsTransform::getRealIterations()
{
return real_iterations_;
}
double GNormalDistributionsTransform::auxilaryFunction_PsiMT(double a, double f_a, double f_0, double g_0, double mu)
{
return (f_a - f_0 - mu * g_0 * a);
}
double GNormalDistributionsTransform::auxilaryFunction_dPsiMT(double g_a, double g_0, double mu)
{
return (g_a - mu * g_0);
}
void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr &input)
{
// Copy input map data from the host memory to the GPU memory
GRegistration::setInputTarget(input);
// Build the voxel grid
if (target_points_number_ != 0) {
voxel_grid_.setLeafSize(resolution_, resolution_, resolution_);
voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_);
}
}
void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr &input)
{
// Copy input map data from the host memory to the GPU memory
GRegistration::setInputTarget(input);
// Build the voxel grid
if (target_points_number_ != 0) {
voxel_grid_.setLeafSize(resolution_, resolution_, resolution_);
voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_);
}
}
void GNormalDistributionsTransform::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess)
{
nr_iterations_ = 0;
converged_ = false;
double gauss_c1, gauss_c2, gauss_d3;
gauss_c1 = 10 * ( 1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow(resolution_, 3);
gauss_d3 = - log(gauss_c2);
gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3;
gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_);
if (guess != Eigen::Matrix4f::Identity()) {
final_transformation_ = guess;
transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess);
}
Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation;
eig_transformation.matrix() = final_transformation_;
Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient;
Eigen::Vector3f init_translation = eig_transformation.translation();
Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2);
p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2);
Eigen::Matrix<double, 6, 6> hessian;
double score = 0;
double delta_p_norm;
score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p);
int loop_time = 0;
while (!converged_) {
previous_transformation_ = transformation_;
Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV);
delta_p = sv.solve(-score_gradient);
delta_p_norm = delta_p.norm();
if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) {
trans_probability_ = score / static_cast<double>(points_number_);
converged_ = delta_p_norm == delta_p_norm;
return;
}
delta_p.normalize();
delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_);
delta_p *= delta_p_norm;
transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ())).matrix();
p = p + delta_p;
//Not update visualizer
if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (::fabs(delta_p_norm) < transformation_epsilon_)))
converged_ = true;
nr_iterations_++;
loop_time++;
}
trans_probability_ = score / static_cast<double>(points_number_);
}
/* First step of computing point gradients */
__global__ void computePointGradients(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dj_ang,
double *pg00, double *pg11, double *pg22,
double *pg13, double *pg23, double *pg04, double *pg14,
double *pg24, double *pg05, double *pg15, double *pg25)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double j_ang[24];
if (threadIdx.x < 24) {
j_ang[threadIdx.x] = dj_ang[threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
//Set the 3x3 block start from (0, 0) to identity matrix
pg00[i] = 1;
pg11[i] = 1;
pg22[i] = 1;
//Compute point derivatives
pg13[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2];
pg23[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5];
pg04[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8];
pg14[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11];
pg24[i] = o_x * j_ang[12] + o_y * j_ang[13] + o_z * j_ang[14];
pg05[i] = o_x * j_ang[15] + o_y * j_ang[16] + o_z * j_ang[17];
pg15[i] = o_x * j_ang[18] + o_y * j_ang[19] + o_z * j_ang[20];
pg25[i] = o_x * j_ang[21] + o_y * j_ang[22] + o_z * j_ang[23];
}
}
/* First step of computing point hessians */
__global__ void computePointHessian0(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dh_ang,
double *ph93, double *ph103, double *ph113,
double *ph123, double *ph94, double *ph133,
double *ph104, double *ph143, double *ph114,
double *ph153, double *ph95, double *ph163,
double *ph105, double *ph173, double *ph115)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double h_ang[18];
if (threadIdx.x < 18) {
h_ang[threadIdx.x] = dh_ang[threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
ph93[i] = 0;
ph103[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2];
ph113[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5];
ph123[i] = ph94[i] = 0;
ph133[i] = ph104[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8];
ph143[i] = ph114[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11];
ph153[i] = ph95[i] = 0;
ph163[i] = ph105[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14];
ph173[i] = ph115[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17];
}
}
__global__ void computePointHessian1(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dh_ang,
double *ph124, double *ph134, double *ph144,
double *ph154, double *ph125, double *ph164,
double *ph135, double *ph174, double *ph145)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double h_ang[18];
if (threadIdx.x < 18) {
h_ang[threadIdx.x] = dh_ang[18 + threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
ph124[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2];
ph134[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5];
ph144[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8];
ph154[i] = ph125[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11];
ph164[i] = ph135[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14];
ph174[i] = ph145[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17];
}
}
__global__ void computePointHessian2(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dh_ang,
double *ph155, double *ph165, double *ph175)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double h_ang[9];
if (threadIdx.x < 9) {
h_ang[threadIdx.x] = dh_ang[36 + threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
ph155[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2];
ph165[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5];
ph175[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8];
}
}
/* compute score_inc list for input points.
* The final score_inc is calculated by a reduction sum
* on this score_inc list. */
__global__ void computeScoreList(int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *e_x_cov_x, double gauss_d1, double *score)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
double score_inc = 0;
for (int vid = starting_voxel_id[i]; vid < starting_voxel_id[i + 1]; vid++) {
double tmp_ex = e_x_cov_x[vid];
score_inc += (tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex) ? 0 : -gauss_d1 * tmp_ex;
}
score[i] = score_inc;
}
}
/* First step to compute score gradient list for input points */
__global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
int voxel_num, double *e_x_cov_x,
double *cov_dxd_pi, double gauss_d1, int valid_voxel_num,
double *score_gradients)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int col = blockIdx.y;
if (col < 6) {
double *sg = score_gradients + col * valid_points_num;
double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num;
double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num;
double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double tmp_sg = 0.0;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
tmp_ex *= gauss_d1;
tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex;
}
}
sg[i] = tmp_sg;
}
}
}
/* Intermediate step to compute e_x_cov_x */
__global__ void computeExCovX(float *trans_x, float *trans_y, float *trans_z, int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centr_x, double *centr_y, double *centr_z,
double gauss_d1, double gauss_d2,
double *e_x_cov_x,
double *icov00, double *icov01, double *icov02,
double *icov10, double *icov11, double *icov12,
double *icov20, double *icov21, double *icov22)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double t_x, t_y, t_z;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
t_x = d_x - centr_x[vid];
t_y = d_y - centr_y[vid];
t_z = d_z - centr_z[vid];
e_x_cov_x[j] = exp(-gauss_d2 * ((t_x * icov00[vid] + t_y * icov01[vid] + t_z * icov02[vid]) * t_x
+ ((t_x * icov10[vid] + t_y * icov11[vid] + t_z * icov12[vid]) * t_y)
+ ((t_x * icov20[vid] + t_y * icov21[vid] + t_z * icov22[vid]) * t_z)) / 2.0);
}
}
}
/* update e_x_cov_x - Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009] */
__global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_voxel_num; i += stride) {
e_x_cov_x[i] *= gauss_d2;
}
}
/* compute cov_dxd_pi as reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]*/
__global__ void computeCovDxdPi(int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *inverse_covariance, int voxel_num,
double gauss_d1, double gauss_d2, double *point_gradients,
double *cov_dxd_pi, int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
if (row < 3 && col < 6) {
double *icov0 = inverse_covariance + row * 3 * voxel_num;
double *icov1 = icov0 + voxel_num;
double *icov2 = icov1 + voxel_num;
double *cov_dxd_pi_tmp = cov_dxd_pi + (row * 6 + col) * valid_voxel_num;
double *pg_tmp0 = point_gradients + col * valid_points_num;
double *pg_tmp1 = pg_tmp0 + 6 * valid_points_num;
double *pg_tmp2 = pg_tmp1 + 6 * valid_points_num;
for (int i = id; i < valid_points_num; i += stride) {
double pg0 = pg_tmp0[i];
double pg1 = pg_tmp1[i];
double pg2 = pg_tmp2[i];
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
cov_dxd_pi_tmp[j] = icov0[vid] * pg0 + icov1[vid] * pg1 + icov2[vid] * pg2;
}
}
}
}
/* First step to compute hessian list for input points */
__global__ void computeHessianListS0(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
double *icov00, double *icov01, double *icov02,
double *icov10, double *icov11, double *icov12,
double *icov20, double *icov21, double *icov22,
double *point_gradients0, double *point_gradients1, double *point_gradients2,
double *tmp_hessian,
int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int col = blockIdx.y;
if (col < 6) {
double *tmp_pg0 = point_gradients0 + col * valid_points_num;
double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num;
double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num;
double *tmp_h = tmp_hessian + col * valid_voxel_num;
for (int i = id; i < valid_points_num && col < 6; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double pg0 = tmp_pg0[i];
double pg1 = tmp_pg1[i];
double pg2 = tmp_pg2[i];
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
tmp_h[j] = (d_x - centroid_x[vid]) * (icov00[vid] * pg0 + icov01[vid] * pg1 + icov02[vid] * pg2)
+ (d_y - centroid_y[vid]) * (icov10[vid] * pg0 + icov11[vid] * pg1 + icov12[vid] * pg2)
+ (d_z - centroid_z[vid]) * (icov20[vid] * pg0 + icov21[vid] * pg1 + icov22[vid] * pg2);
}
}
}
}
/* Fourth step to compute hessian list */
__global__ void computeHessianListS1(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
double gauss_d1, double gauss_d2, double *hessians,
double *e_x_cov_x, double *tmp_hessian, double *cov_dxd_pi,
double *point_gradients,
int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
if (row < 6 && col < 6) {
double *cov_dxd_pi_mat0 = cov_dxd_pi + row * valid_voxel_num;
double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num;
double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num;
double *tmp_h = tmp_hessian + col * valid_voxel_num;
double *h = hessians + (row * 6 + col) * valid_points_num;
double *tmp_pg0 = point_gradients + col * valid_points_num;
double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num;
double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double pg0 = tmp_pg0[i];
double pg1 = tmp_pg1[i];
double pg2 = tmp_pg2[i];
double final_hessian = 0.0;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
//Transformed coordinates
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
double cov_dxd0 = cov_dxd_pi_mat0[j];
double cov_dxd1 = cov_dxd_pi_mat1[j];
double cov_dxd2 = cov_dxd_pi_mat2[j];
tmp_ex *= gauss_d1;
final_hessian += -gauss_d2 * ((d_x - centroid_x[vid]) * cov_dxd0 + (d_y - centroid_y[vid]) * cov_dxd1 + (d_z - centroid_z[vid]) * cov_dxd2) * tmp_h[j] * tmp_ex;
final_hessian += (pg0 * cov_dxd0 + pg1 * cov_dxd1 + pg2 * cov_dxd2) * tmp_ex;
}
}
h[i] = final_hessian;
}
}
}
__global__ void computeHessianListS2(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
double gauss_d1, double *e_x_cov_x,
double *icov00, double *icov01, double *icov02,
double *icov10, double *icov11, double *icov12,
double *icov20, double *icov21, double *icov22,
double *point_hessians, double *hessians,
int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
if (row < 6 && col < 6) {
double *h = hessians + (row * 6 + col) * valid_points_num;
double *tmp_ph0 = point_hessians + ((3 * row) * 6 + col) * valid_points_num;
double *tmp_ph1 = tmp_ph0 + 6 * valid_points_num;
double *tmp_ph2 = tmp_ph1 + 6 * valid_points_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double ph0 = tmp_ph0[i];
double ph1 = tmp_ph1[i];
double ph2 = tmp_ph2[i];
double final_hessian = h[i];
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
//Transformed coordinates
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
tmp_ex *= gauss_d1;
final_hessian += (d_x - centroid_x[vid]) * (icov00[vid] * ph0 + icov01[vid] * ph1 + icov02[vid] * ph2) * tmp_ex;
final_hessian += (d_y - centroid_y[vid]) * (icov10[vid] * ph0 + icov11[vid] * ph1 + icov12[vid] * ph2) * tmp_ex;
final_hessian += (d_z - centroid_z[vid]) * (icov20[vid] * ph0 + icov21[vid] * ph1 + icov22[vid] * ph2) * tmp_ex;
}
}
h[i] = final_hessian;
}
}
}
/* Compute sum of a list of matrices */
__global__ void matrixSum(double *matrix_list, int full_size, int half_size, int rows, int cols, int offset)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
for (int i = index; i < half_size && row < rows && col < cols; i += stride) {
MatrixDevice left(rows, cols, offset, matrix_list + i);
double *right_ptr = (i + half_size < full_size) ? matrix_list + i + half_size : NULL;
MatrixDevice right(rows, cols, offset, right_ptr);
if (right_ptr != NULL) {
left(row, col) += right(row, col);
}
}
}
/* Compute sum of score_inc list */
__global__ void sumScore(double *score, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
score[i] += (i + half_size < full_size) ? score[i + half_size] : 0;
}
}
double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z,
int points_num, Eigen::Matrix<double, 6, 1> &pose, bool compute_hessian)
{
MatrixHost p(6, 1);
for (int i = 0; i < 6; i++) {
p(i) = pose(i, 0);
}
score_gradient.setZero ();
hessian.setZero ();
//Compute Angle Derivatives
computeAngleDerivatives(p);
//Radius Search
int *valid_points, *voxel_id, *starting_voxel_id;
int valid_voxel_num, valid_points_num;
valid_points = voxel_id = starting_voxel_id = NULL;
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num);
double *covariance = voxel_grid_.getCovarianceList();
double *inverse_covariance = voxel_grid_.getInverseCovarianceList();
double *centroid = voxel_grid_.getCentroidList();
int *points_per_voxel = voxel_grid_.getPointsPerVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
if (valid_points_num == 0)
return 0;
//Update score gradient and hessian matrix
double *gradients, *hessians, *point_gradients, *point_hessians, *score;
checkCudaErrors(hipMalloc(&gradients, sizeof(double) * valid_points_num * 6));
checkCudaErrors(hipMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(hipMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(hipMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6));
checkCudaErrors(hipMalloc(&score, sizeof(double) * valid_points_num));
checkCudaErrors(hipMemset(gradients, 0, sizeof(double) * valid_points_num * 6));
checkCudaErrors(hipMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(hipMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(hipMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
dim3 grid;
hipLaunchKernelGGL(( computePointGradients), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_.buffer(),
point_gradients,
point_gradients + valid_points_num * 7,
point_gradients + valid_points_num * 14,
point_gradients + valid_points_num * 9,
point_gradients + valid_points_num * 15,
point_gradients + valid_points_num * 4,
point_gradients + valid_points_num * 10,
point_gradients + valid_points_num * 16,
point_gradients + valid_points_num * 5,
point_gradients + valid_points_num * 11,
point_gradients + valid_points_num * 17);
checkCudaErrors(hipGetLastError());
if (compute_hessian) {
hipLaunchKernelGGL(( computePointHessian0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69,
point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81,
point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70,
point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99,
point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computePointHessian1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88,
point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100,
point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computePointHessian2), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107);
checkCudaErrors(hipGetLastError());
}
checkCudaErrors(hipDeviceSynchronize());
double *tmp_hessian;
checkCudaErrors(hipMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6));
double *e_x_cov_x;
checkCudaErrors(hipMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num));
double *cov_dxd_pi;
checkCudaErrors(hipMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6));
hipLaunchKernelGGL(( computeExCovX), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_,
e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeScoreList), dim3(grid_x), dim3(block_x), 0, 0, starting_voxel_id, voxel_id, valid_points_num, e_x_cov_x, gauss_d1_, score);
checkCudaErrors(hipGetLastError());
int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num;
int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1;
hipLaunchKernelGGL(( updateExCovX), dim3(grid_x2), dim3(block_x2), 0, 0, e_x_cov_x, gauss_d2_, valid_voxel_num);
checkCudaErrors(hipGetLastError());
grid.x = grid_x;
grid.y = 3;
grid.z = 6;
hipLaunchKernelGGL(( computeCovDxdPi), dim3(grid), dim3(block_x), 0, 0, valid_points, starting_voxel_id, voxel_id, valid_points_num,
inverse_covariance, voxel_num,
gauss_d1_, gauss_d2_, point_gradients,
cov_dxd_pi, valid_voxel_num);
checkCudaErrors(hipGetLastError());
grid.x = grid_x;
grid.y = 6;
grid.z = 1;
hipLaunchKernelGGL(( computeScoreGradientList), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
voxel_num, e_x_cov_x,
cov_dxd_pi, gauss_d1_, valid_voxel_num, gradients);
checkCudaErrors(hipGetLastError());
if (compute_hessian) {
grid.y = 6;
grid.z = 1;
hipLaunchKernelGGL(( computeHessianListS0), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_gradients, point_gradients + 6 * valid_points_num, point_gradients + 12 * valid_points_num,
tmp_hessian, valid_voxel_num);
checkCudaErrors(hipGetLastError());
grid.z = 6;
hipLaunchKernelGGL(( computeHessianListS1), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_, hessians,
e_x_cov_x, tmp_hessian, cov_dxd_pi,
point_gradients,
valid_voxel_num);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeHessianListS2), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_hessians, hessians, valid_voxel_num);
checkCudaErrors(hipGetLastError());
}
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
grid.x = grid_x;
grid.y = 1;
grid.z = 6;
hipLaunchKernelGGL(( matrixSum), dim3(grid), dim3(block_x), 0, 0, gradients, full_size, half_size, 1, 6, valid_points_num);
checkCudaErrors(hipGetLastError());
grid.y = 6;
hipLaunchKernelGGL(( matrixSum), dim3(grid), dim3(block_x), 0, 0, hessians, full_size, half_size, 6, 6, valid_points_num);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( sumScore), dim3(grid_x), dim3(block_x), 0, 0, score, full_size, half_size);
checkCudaErrors(hipGetLastError());
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(hipDeviceSynchronize());
MatrixDevice dgrad(1, 6, valid_points_num, gradients), dhess(6, 6, valid_points_num, hessians);
MatrixHost hgrad(1, 6), hhess(6, 6);
hgrad.moveToHost(dgrad);
hhess.moveToHost(dhess);
for (int i = 0; i < 6; i++) {
score_gradient(i) = hgrad(i);
}
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhess(i, j);
}
}
double score_inc;
checkCudaErrors(hipMemcpy(&score_inc, score, sizeof(double), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(gradients));
checkCudaErrors(hipFree(hessians));
checkCudaErrors(hipFree(point_hessians));
checkCudaErrors(hipFree(point_gradients));
checkCudaErrors(hipFree(score));
checkCudaErrors(hipFree(tmp_hessian));
checkCudaErrors(hipFree(e_x_cov_x));
checkCudaErrors(hipFree(cov_dxd_pi));
if (valid_points != NULL)
checkCudaErrors(hipFree(valid_points));
if (voxel_id != NULL)
checkCudaErrors(hipFree(voxel_id));
if (starting_voxel_id != NULL)
checkCudaErrors(hipFree(starting_voxel_id));
return score_inc;
}
void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost &pose, bool compute_hessian)
{
double cx, cy, cz, sx, sy, sz;
if (fabs(pose(3)) < 10e-5) {
cx = 1.0;
sx = 0.0;
} else {
cx = cos(pose(3));
sx = sin(pose(3));
}
if (fabs(pose(4)) < 10e-5) {
cy = 1.0;
sy = 0.0;
} else {
cy = cos(pose(4));
sy = sin(pose(4));
}
if (fabs(pose(5)) < 10e-5) {
cz = 1.0;
sz = 0.0;
} else {
cz = cos(pose(5));
sz = sin(pose(5));
}
j_ang_(0) = -sx * sz + cx * sy * cz;
j_ang_(1) = -sx * cz - cx * sy * sz;
j_ang_(2) = -cx * cy;
j_ang_(3) = cx * sz + sx * sy * cz;
j_ang_(4) = cx * cz - sx * sy * sz;
j_ang_(5) = -sx * cy;
j_ang_(6) = -sy * cz;
j_ang_(7) = sy * sz;
j_ang_(8) = cy;
j_ang_(9) = sx * cy * cz;
j_ang_(10) = -sx * cy * sz;
j_ang_(11) = sx * sy;
j_ang_(12) = -cx * cy * cz;
j_ang_(13) = cx * cy * sz;
j_ang_(14) = -cx * sy;
j_ang_(15) = -cy * sz;
j_ang_(16) = -cy * cz;
j_ang_(17) = 0;
j_ang_(18) = cx * cz - sx * sy * sz;
j_ang_(19) = -cx * sz - sx * sy * cz;
j_ang_(20) = 0;
j_ang_(21) = sx * cz + cx * sy * sz;
j_ang_(22) = cx * sy * cz - sx * sz;
j_ang_(23) = 0;
j_ang_.moveToGpu(dj_ang_);
if (compute_hessian) {
h_ang_(0) = -cx * sz - sx * sy * cz;
h_ang_(1) = -cx * cz + sx * sy * sz;
h_ang_(2) = sx * cy;
h_ang_(3) = -sx * sz + cx * sy * cz;
h_ang_(4) = -cx * sy * sz - sx * cz;
h_ang_(5) = -cx * cy;
h_ang_(6) = cx * cy * cz;
h_ang_(7) = -cx * cy * sz;
h_ang_(8) = cx * sy;
h_ang_(9) = sx * cy * cz;
h_ang_(10) = -sx * cy * sz;
h_ang_(11) = sx * sy;
h_ang_(12) = -sx * cz - cx * sy * sz;
h_ang_(13) = sx * sz - cx * sy * cz;
h_ang_(14) = 0;
h_ang_(15) = cx * cz - sx * sy * sz;
h_ang_(16) = -sx * sy * cz - cx * sz;
h_ang_(17) = 0;
h_ang_(18) = -cy * cz;
h_ang_(19) = cy * sz;
h_ang_(20) = sy;
h_ang_(21) = -sx * sy * cz;
h_ang_(22) = sx * sy * sz;
h_ang_(23) = sx * cy;
h_ang_(24) = cx * sy * cz;
h_ang_(25) = -cx * sy * sz;
h_ang_(26) = -cx * cy;
h_ang_(27) = sy * sz;
h_ang_(28) = sy * cz;
h_ang_(29) = 0;
h_ang_(30) = -sx * cy * sz;
h_ang_(31) = -sx * cy * cz;
h_ang_(32) = 0;
h_ang_(33) = cx * cy * sz;
h_ang_(34) = cx * cy * cz;
h_ang_(35) = 0;
h_ang_(36) = -cy * cz;
h_ang_(37) = cy * sz;
h_ang_(38) = 0;
h_ang_(39) = -cx * sz - sx * sy * cz;
h_ang_(40) = -cx * cz + sx * sy * sz;
h_ang_(41) = 0;
h_ang_(42) = -sx * sz + cx * sy * cz;
h_ang_(43) = -cx * sy * sz - sx * cz;
h_ang_(44) = 0;
h_ang_.moveToGpu(dh_ang_);
}
}
__global__ void gpuTransform(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int point_num, MatrixDevice transform)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
float x, y, z;
for (int i = idx; i < point_num; i += stride) {
x = in_x[i];
y = in_y[i];
z = in_z[i];
trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3);
trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3);
trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3);
}
}
void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int points_number, const Eigen::Matrix<float, 4, 4> &transform)
{
Eigen::Transform<float, 3, Eigen::Affine> t(transform);
MatrixHost htrans(3, 4);
MatrixDevice dtrans(3, 4);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
htrans(i, j) = t(i, j);
}
}
htrans.moveToGpu(dtrans);
if (points_number > 0) {
int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X;
int grid_x = (points_number - 1) / block_x + 1;
hipLaunchKernelGGL(( gpuTransform), dim3(grid_x), dim3(block_x) , 0, 0, in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
}
dtrans.memFree();
}
double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir,
double step_init, double step_max, double step_min, double &score,
Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z, int points_num)
{
double phi_0 = -score;
double d_phi_0 = -(score_gradient.dot(step_dir));
Eigen::Matrix<double, 6, 1> x_t;
if (d_phi_0 >= 0) {
if (d_phi_0 == 0)
return 0;
else {
d_phi_0 *= -1;
step_dir *= -1;
}
}
int max_step_iterations = 10;
int step_iterations = 0;
double mu = 1.e-4;
double nu = 0.9;
double a_l = 0, a_u = 0;
double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu);
double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu);
double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
bool interval_converged = (step_max - step_min) > 0, open_interval = true;
double a_t = step_init;
a_t = ::min(a_t, step_max);
a_t = ::max(a_t, step_min);
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t);
double phi_t = -score;
double d_phi_t = -(score_gradient.dot(step_dir));
double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) {
if (open_interval) {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
a_t = (a_t < step_max) ? a_t : step_max;
a_t = (a_t > step_min) ? a_t : step_min;
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false);
phi_t -= score;
d_phi_t -= (score_gradient.dot(step_dir));
psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) {
open_interval = false;
f_l += phi_0 - mu * d_phi_0 * a_l;
g_l += mu * d_phi_0;
f_u += phi_0 - mu * d_phi_0 * a_u;
g_u += mu * d_phi_0;
}
if (open_interval) {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
step_iterations++;
}
if (step_iterations) {
computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t);
}
real_iterations_ += step_iterations;
return a_t;
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l,
double a_u, double f_u, double g_u,
double a_t, double f_t, double g_t)
{
// Case 1 in Trial Value Selection [More, Thuente 1994]
if (f_t > f_l) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l
// Equation 2.4.2 [Sun, Yuan 2006]
double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t));
if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l))
return (a_c);
else
return (0.5 * (a_q + a_c));
}
// Case 2 in Trial Value Selection [More, Thuente 1994]
else if (g_t * g_l < 0) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t))
return (a_c);
else
return (a_s);
}
// Case 3 in Trial Value Selection [More, Thuente 1994]
else if (std::fabs (g_t) <= std::fabs (g_l)) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
double a_t_next;
if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t))
a_t_next = a_c;
else
a_t_next = a_s;
if (a_t > a_l)
return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next));
else
return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next));
}
// Case 4 in Trial Value Selection [More, Thuente 1994]
else {
// Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u;
double w = std::sqrt (z * z - g_t * g_u);
// Equation 2.4.56 [Sun, Yuan 2006]
return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w));
}
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l,
double &a_u, double &f_u, double &g_u,
double a_t, double f_t, double g_t)
{
// Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994]
if (f_t > f_l) {
a_u = a_t;
f_u = f_t;
g_u = g_t;
return (false);
}
// Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) > 0) {
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) < 0) {
a_u = a_l;
f_u = f_l;
g_u = g_l;
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Interval Converged
else
return (true);
}
void GNormalDistributionsTransform::computeHessian(Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p)
{
int *valid_points, *voxel_id, *starting_voxel_id;
int valid_voxel_num, valid_points_num;
//Radius Search
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num);
double *centroid = voxel_grid_.getCentroidList();
double *covariance = voxel_grid_.getCovarianceList();
double *inverse_covariance = voxel_grid_.getInverseCovarianceList();
int *points_per_voxel = voxel_grid_.getPointsPerVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
if (valid_points_num <= 0)
return;
//Update score gradient and hessian matrix
double *hessians, *point_gradients, *point_hessians;
checkCudaErrors(hipMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(hipMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(hipMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6));
checkCudaErrors(hipMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(hipMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(hipMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
dim3 grid;
hipLaunchKernelGGL(( computePointGradients), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_.buffer(),
point_gradients,
point_gradients + valid_points_num * 7,
point_gradients + valid_points_num * 14,
point_gradients + valid_points_num * 9,
point_gradients + valid_points_num * 15,
point_gradients + valid_points_num * 4,
point_gradients + valid_points_num * 10,
point_gradients + valid_points_num * 16,
point_gradients + valid_points_num * 5,
point_gradients + valid_points_num * 11,
point_gradients + valid_points_num * 17);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computePointHessian0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69,
point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81,
point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70,
point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99,
point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computePointHessian1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88,
point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100,
point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computePointHessian2), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107);
checkCudaErrors(hipGetLastError());
double *tmp_hessian;
checkCudaErrors(hipMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6));
double *e_x_cov_x;
checkCudaErrors(hipMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num));
double *cov_dxd_pi;
checkCudaErrors(hipMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6));
hipLaunchKernelGGL(( computeExCovX), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_,
e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num);
checkCudaErrors(hipGetLastError());
grid.x = grid_x;
grid.y = 3;
grid.z = 6;
hipLaunchKernelGGL(( computeCovDxdPi), dim3(grid), dim3(block_x), 0, 0, valid_points, starting_voxel_id, voxel_id, valid_points_num,
inverse_covariance, voxel_num,
gauss_d1_, gauss_d2_, point_gradients,
cov_dxd_pi, valid_voxel_num);
checkCudaErrors(hipGetLastError());
int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num;
int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1;
hipLaunchKernelGGL(( updateExCovX), dim3(grid_x2), dim3(block_x2), 0, 0, e_x_cov_x, gauss_d2_, valid_voxel_num);
checkCudaErrors(hipGetLastError());
grid.y = 6;
grid.z = 1;
hipLaunchKernelGGL(( computeHessianListS0), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_gradients, point_gradients + 6 * valid_points_num, point_gradients + 12 * valid_points_num,
tmp_hessian, valid_voxel_num);
checkCudaErrors(hipGetLastError());
grid.z = 6;
hipLaunchKernelGGL(( computeHessianListS1), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_, hessians,
e_x_cov_x, tmp_hessian, cov_dxd_pi,
point_gradients,
valid_voxel_num);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeHessianListS2), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_hessians, hessians, valid_voxel_num);
checkCudaErrors(hipGetLastError());
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
grid.x = grid_x;
grid.y = 6;
grid.z = 6;
hipLaunchKernelGGL(( matrixSum), dim3(grid_x), dim3(block_x), 0, 0, hessians, full_size, half_size, 6, 6, valid_points_num);
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(hipDeviceSynchronize());
MatrixDevice dhessian(6, 6, valid_points_num, hessians);
MatrixHost hhessian(6, 6);
hhessian.moveToHost(dhessian);
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhessian(i, j);
}
}
checkCudaErrors(hipFree(hessians));
checkCudaErrors(hipFree(point_hessians));
checkCudaErrors(hipFree(point_gradients));
checkCudaErrors(hipFree(tmp_hessian));
checkCudaErrors(hipFree(e_x_cov_x));
checkCudaErrors(hipFree(cov_dxd_pi));
if (valid_points != NULL) {
checkCudaErrors(hipFree(valid_points));
}
if (voxel_id != NULL) {
checkCudaErrors(hipFree(voxel_id));
}
if (starting_voxel_id != NULL) {
checkCudaErrors(hipFree(starting_voxel_id));
}
dhessian.memFree();
}
template <typename T>
__global__ void gpuSum(T *input, int size, int half_size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < half_size; i += stride) {
if (i + half_size < size) {
input[i] += (half_size < size) ? input[i + half_size] : 0;
}
}
}
double GNormalDistributionsTransform::getFitnessScore(double max_range)
{
double fitness_score = 0.0;
float *trans_x, *trans_y, *trans_z;
checkCudaErrors(hipMalloc(&trans_x, sizeof(float) * points_number_));
checkCudaErrors(hipMalloc(&trans_y, sizeof(float) * points_number_));
checkCudaErrors(hipMalloc(&trans_z, sizeof(float) * points_number_));
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_number_, final_transformation_);
int *valid_distance;
checkCudaErrors(hipMalloc(&valid_distance, sizeof(int) * points_number_));
double *min_distance;
checkCudaErrors(hipMalloc(&min_distance, sizeof(double) * points_number_));
voxel_grid_.nearestNeighborSearch(trans_x, trans_y, trans_z, points_number_, valid_distance, min_distance, max_range);
int size = points_number_;
int half_size;
while (size > 1) {
half_size = (size - 1) / 2 + 1;
int block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
int grid_x = (half_size - 1) / block_x + 1;
hipLaunchKernelGGL(( gpuSum<double>), dim3(grid_x), dim3(block_x), 0, 0, min_distance, size, half_size);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gpuSum<int>), dim3(grid_x), dim3(block_x), 0, 0, valid_distance, size, half_size);
checkCudaErrors(hipGetLastError());
size = half_size;
}
checkCudaErrors(hipDeviceSynchronize());
int nr;
checkCudaErrors(hipMemcpy(&nr, valid_distance, sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&fitness_score, min_distance, sizeof(double), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(trans_x));
checkCudaErrors(hipFree(trans_y));
checkCudaErrors(hipFree(trans_z));
checkCudaErrors(hipFree(valid_distance));
checkCudaErrors(hipFree(min_distance));
if (nr > 0)
return (fitness_score / nr);
return DBL_MAX;
}
}
| e04459a62e73a6451c76fe17d5cb769055a80275.cu | #include "fast_pcl/ndt_gpu/NormalDistributionsTransform.h"
#include "fast_pcl/ndt_gpu/debug.h"
#include <cmath>
#include <iostream>
#include <pcl/common/transforms.h>
namespace gpu {
GNormalDistributionsTransform::GNormalDistributionsTransform()
{
//GRegistration::GRegistration();
gauss_d1_ = gauss_d2_ = 0;
outlier_ratio_ = 0.55;
step_size_ = 0.1;
resolution_ = 1.0f;
trans_probability_ = 0;
double gauss_c1, gauss_c2, gauss_d3;
// Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009]
gauss_c1 = 10.0 * (1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow (resolution_, 3);
gauss_d3 = -log (gauss_c2);
gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3;
gauss_d2_ = -2 * log ((-log ( gauss_c1 * exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_);
transformation_epsilon_ = 0.1;
max_iterations_ = 35;
j_ang_ = MatrixHost(24, 1);
h_ang_ = MatrixHost(45, 1);
dj_ang_ = MatrixDevice(24, 1);
dh_ang_ = MatrixDevice(45, 1);
real_iterations_ = 0;
}
GNormalDistributionsTransform::GNormalDistributionsTransform(const GNormalDistributionsTransform &other)
{
gauss_d1_ = other.gauss_d1_;
gauss_d2_ = other.gauss_d2_;
outlier_ratio_ = other.outlier_ratio_;
j_ang_ = other.j_ang_;
h_ang_ = other.h_ang_;
dj_ang_ = other.dj_ang_;
dh_ang_ = other.dh_ang_;
step_size_ = other.step_size_;
resolution_ = other.resolution_;
trans_probability_ = other.trans_probability_;
real_iterations_ = other.real_iterations_;
voxel_grid_ = other.voxel_grid_;
}
GNormalDistributionsTransform::~GNormalDistributionsTransform()
{
dj_ang_.memFree();
dh_ang_.memFree();
}
void GNormalDistributionsTransform::setStepSize(double step_size)
{
step_size_ = step_size;
}
void GNormalDistributionsTransform::setResolution(float resolution)
{
resolution_ = resolution;
}
void GNormalDistributionsTransform::setOutlierRatio(double olr)
{
outlier_ratio_ = olr;
}
double GNormalDistributionsTransform::getStepSize() const
{
return step_size_;
}
float GNormalDistributionsTransform::getResolution() const
{
return resolution_;
}
double GNormalDistributionsTransform::getOutlierRatio() const
{
return outlier_ratio_;
}
double GNormalDistributionsTransform::getTransformationProbability() const
{
return trans_probability_;
}
int GNormalDistributionsTransform::getRealIterations()
{
return real_iterations_;
}
double GNormalDistributionsTransform::auxilaryFunction_PsiMT(double a, double f_a, double f_0, double g_0, double mu)
{
return (f_a - f_0 - mu * g_0 * a);
}
double GNormalDistributionsTransform::auxilaryFunction_dPsiMT(double g_a, double g_0, double mu)
{
return (g_a - mu * g_0);
}
void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr &input)
{
// Copy input map data from the host memory to the GPU memory
GRegistration::setInputTarget(input);
// Build the voxel grid
if (target_points_number_ != 0) {
voxel_grid_.setLeafSize(resolution_, resolution_, resolution_);
voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_);
}
}
void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr &input)
{
// Copy input map data from the host memory to the GPU memory
GRegistration::setInputTarget(input);
// Build the voxel grid
if (target_points_number_ != 0) {
voxel_grid_.setLeafSize(resolution_, resolution_, resolution_);
voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_);
}
}
void GNormalDistributionsTransform::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess)
{
nr_iterations_ = 0;
converged_ = false;
double gauss_c1, gauss_c2, gauss_d3;
gauss_c1 = 10 * ( 1 - outlier_ratio_);
gauss_c2 = outlier_ratio_ / pow(resolution_, 3);
gauss_d3 = - log(gauss_c2);
gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3;
gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_);
if (guess != Eigen::Matrix4f::Identity()) {
final_transformation_ = guess;
transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess);
}
Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation;
eig_transformation.matrix() = final_transformation_;
Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient;
Eigen::Vector3f init_translation = eig_transformation.translation();
Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2);
p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2);
Eigen::Matrix<double, 6, 6> hessian;
double score = 0;
double delta_p_norm;
score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p);
int loop_time = 0;
while (!converged_) {
previous_transformation_ = transformation_;
Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV);
delta_p = sv.solve(-score_gradient);
delta_p_norm = delta_p.norm();
if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) {
trans_probability_ = score / static_cast<double>(points_number_);
converged_ = delta_p_norm == delta_p_norm;
return;
}
delta_p.normalize();
delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_);
delta_p *= delta_p_norm;
transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ())).matrix();
p = p + delta_p;
//Not update visualizer
if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (std::fabs(delta_p_norm) < transformation_epsilon_)))
converged_ = true;
nr_iterations_++;
loop_time++;
}
trans_probability_ = score / static_cast<double>(points_number_);
}
/* First step of computing point gradients */
__global__ void computePointGradients(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dj_ang,
double *pg00, double *pg11, double *pg22,
double *pg13, double *pg23, double *pg04, double *pg14,
double *pg24, double *pg05, double *pg15, double *pg25)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double j_ang[24];
if (threadIdx.x < 24) {
j_ang[threadIdx.x] = dj_ang[threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
//Set the 3x3 block start from (0, 0) to identity matrix
pg00[i] = 1;
pg11[i] = 1;
pg22[i] = 1;
//Compute point derivatives
pg13[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2];
pg23[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5];
pg04[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8];
pg14[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11];
pg24[i] = o_x * j_ang[12] + o_y * j_ang[13] + o_z * j_ang[14];
pg05[i] = o_x * j_ang[15] + o_y * j_ang[16] + o_z * j_ang[17];
pg15[i] = o_x * j_ang[18] + o_y * j_ang[19] + o_z * j_ang[20];
pg25[i] = o_x * j_ang[21] + o_y * j_ang[22] + o_z * j_ang[23];
}
}
/* First step of computing point hessians */
__global__ void computePointHessian0(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dh_ang,
double *ph93, double *ph103, double *ph113,
double *ph123, double *ph94, double *ph133,
double *ph104, double *ph143, double *ph114,
double *ph153, double *ph95, double *ph163,
double *ph105, double *ph173, double *ph115)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double h_ang[18];
if (threadIdx.x < 18) {
h_ang[threadIdx.x] = dh_ang[threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
ph93[i] = 0;
ph103[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2];
ph113[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5];
ph123[i] = ph94[i] = 0;
ph133[i] = ph104[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8];
ph143[i] = ph114[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11];
ph153[i] = ph95[i] = 0;
ph163[i] = ph105[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14];
ph173[i] = ph115[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17];
}
}
__global__ void computePointHessian1(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dh_ang,
double *ph124, double *ph134, double *ph144,
double *ph154, double *ph125, double *ph164,
double *ph135, double *ph174, double *ph145)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double h_ang[18];
if (threadIdx.x < 18) {
h_ang[threadIdx.x] = dh_ang[18 + threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
ph124[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2];
ph134[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5];
ph144[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8];
ph154[i] = ph125[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11];
ph164[i] = ph135[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14];
ph174[i] = ph145[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17];
}
}
__global__ void computePointHessian2(float *x, float *y, float *z, int points_num,
int *valid_points, int valid_points_num,
double *dh_ang,
double *ph155, double *ph165, double *ph175)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
__shared__ double h_ang[9];
if (threadIdx.x < 9) {
h_ang[threadIdx.x] = dh_ang[36 + threadIdx.x];
}
__syncthreads();
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
//Orignal coordinates
double o_x = static_cast<double>(x[pid]);
double o_y = static_cast<double>(y[pid]);
double o_z = static_cast<double>(z[pid]);
ph155[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2];
ph165[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5];
ph175[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8];
}
}
/* compute score_inc list for input points.
* The final score_inc is calculated by a reduction sum
* on this score_inc list. */
__global__ void computeScoreList(int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *e_x_cov_x, double gauss_d1, double *score)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
double score_inc = 0;
for (int vid = starting_voxel_id[i]; vid < starting_voxel_id[i + 1]; vid++) {
double tmp_ex = e_x_cov_x[vid];
score_inc += (tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex) ? 0 : -gauss_d1 * tmp_ex;
}
score[i] = score_inc;
}
}
/* First step to compute score gradient list for input points */
__global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
int voxel_num, double *e_x_cov_x,
double *cov_dxd_pi, double gauss_d1, int valid_voxel_num,
double *score_gradients)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int col = blockIdx.y;
if (col < 6) {
double *sg = score_gradients + col * valid_points_num;
double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num;
double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num;
double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double tmp_sg = 0.0;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
tmp_ex *= gauss_d1;
tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex;
}
}
sg[i] = tmp_sg;
}
}
}
/* Intermediate step to compute e_x_cov_x */
__global__ void computeExCovX(float *trans_x, float *trans_y, float *trans_z, int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centr_x, double *centr_y, double *centr_z,
double gauss_d1, double gauss_d2,
double *e_x_cov_x,
double *icov00, double *icov01, double *icov02,
double *icov10, double *icov11, double *icov12,
double *icov20, double *icov21, double *icov22)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double t_x, t_y, t_z;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
t_x = d_x - centr_x[vid];
t_y = d_y - centr_y[vid];
t_z = d_z - centr_z[vid];
e_x_cov_x[j] = exp(-gauss_d2 * ((t_x * icov00[vid] + t_y * icov01[vid] + t_z * icov02[vid]) * t_x
+ ((t_x * icov10[vid] + t_y * icov11[vid] + t_z * icov12[vid]) * t_y)
+ ((t_x * icov20[vid] + t_y * icov21[vid] + t_z * icov22[vid]) * t_z)) / 2.0);
}
}
}
/* update e_x_cov_x - Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009] */
__global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_voxel_num; i += stride) {
e_x_cov_x[i] *= gauss_d2;
}
}
/* compute cov_dxd_pi as reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]*/
__global__ void computeCovDxdPi(int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *inverse_covariance, int voxel_num,
double gauss_d1, double gauss_d2, double *point_gradients,
double *cov_dxd_pi, int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
if (row < 3 && col < 6) {
double *icov0 = inverse_covariance + row * 3 * voxel_num;
double *icov1 = icov0 + voxel_num;
double *icov2 = icov1 + voxel_num;
double *cov_dxd_pi_tmp = cov_dxd_pi + (row * 6 + col) * valid_voxel_num;
double *pg_tmp0 = point_gradients + col * valid_points_num;
double *pg_tmp1 = pg_tmp0 + 6 * valid_points_num;
double *pg_tmp2 = pg_tmp1 + 6 * valid_points_num;
for (int i = id; i < valid_points_num; i += stride) {
double pg0 = pg_tmp0[i];
double pg1 = pg_tmp1[i];
double pg2 = pg_tmp2[i];
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
cov_dxd_pi_tmp[j] = icov0[vid] * pg0 + icov1[vid] * pg1 + icov2[vid] * pg2;
}
}
}
}
/* First step to compute hessian list for input points */
__global__ void computeHessianListS0(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
double *icov00, double *icov01, double *icov02,
double *icov10, double *icov11, double *icov12,
double *icov20, double *icov21, double *icov22,
double *point_gradients0, double *point_gradients1, double *point_gradients2,
double *tmp_hessian,
int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int col = blockIdx.y;
if (col < 6) {
double *tmp_pg0 = point_gradients0 + col * valid_points_num;
double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num;
double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num;
double *tmp_h = tmp_hessian + col * valid_voxel_num;
for (int i = id; i < valid_points_num && col < 6; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double pg0 = tmp_pg0[i];
double pg1 = tmp_pg1[i];
double pg2 = tmp_pg2[i];
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
tmp_h[j] = (d_x - centroid_x[vid]) * (icov00[vid] * pg0 + icov01[vid] * pg1 + icov02[vid] * pg2)
+ (d_y - centroid_y[vid]) * (icov10[vid] * pg0 + icov11[vid] * pg1 + icov12[vid] * pg2)
+ (d_z - centroid_z[vid]) * (icov20[vid] * pg0 + icov21[vid] * pg1 + icov22[vid] * pg2);
}
}
}
}
/* Fourth step to compute hessian list */
__global__ void computeHessianListS1(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
double gauss_d1, double gauss_d2, double *hessians,
double *e_x_cov_x, double *tmp_hessian, double *cov_dxd_pi,
double *point_gradients,
int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
if (row < 6 && col < 6) {
double *cov_dxd_pi_mat0 = cov_dxd_pi + row * valid_voxel_num;
double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num;
double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num;
double *tmp_h = tmp_hessian + col * valid_voxel_num;
double *h = hessians + (row * 6 + col) * valid_points_num;
double *tmp_pg0 = point_gradients + col * valid_points_num;
double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num;
double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double pg0 = tmp_pg0[i];
double pg1 = tmp_pg1[i];
double pg2 = tmp_pg2[i];
double final_hessian = 0.0;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
//Transformed coordinates
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
double cov_dxd0 = cov_dxd_pi_mat0[j];
double cov_dxd1 = cov_dxd_pi_mat1[j];
double cov_dxd2 = cov_dxd_pi_mat2[j];
tmp_ex *= gauss_d1;
final_hessian += -gauss_d2 * ((d_x - centroid_x[vid]) * cov_dxd0 + (d_y - centroid_y[vid]) * cov_dxd1 + (d_z - centroid_z[vid]) * cov_dxd2) * tmp_h[j] * tmp_ex;
final_hessian += (pg0 * cov_dxd0 + pg1 * cov_dxd1 + pg2 * cov_dxd2) * tmp_ex;
}
}
h[i] = final_hessian;
}
}
}
__global__ void computeHessianListS2(float *trans_x, float *trans_y, float *trans_z,
int *valid_points,
int *starting_voxel_id, int *voxel_id, int valid_points_num,
double *centroid_x, double *centroid_y, double *centroid_z,
double gauss_d1, double *e_x_cov_x,
double *icov00, double *icov01, double *icov02,
double *icov10, double *icov11, double *icov12,
double *icov20, double *icov21, double *icov22,
double *point_hessians, double *hessians,
int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
if (row < 6 && col < 6) {
double *h = hessians + (row * 6 + col) * valid_points_num;
double *tmp_ph0 = point_hessians + ((3 * row) * 6 + col) * valid_points_num;
double *tmp_ph1 = tmp_ph0 + 6 * valid_points_num;
double *tmp_ph2 = tmp_ph1 + 6 * valid_points_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double ph0 = tmp_ph0[i];
double ph1 = tmp_ph1[i];
double ph2 = tmp_ph2[i];
double final_hessian = h[i];
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
//Transformed coordinates
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
tmp_ex *= gauss_d1;
final_hessian += (d_x - centroid_x[vid]) * (icov00[vid] * ph0 + icov01[vid] * ph1 + icov02[vid] * ph2) * tmp_ex;
final_hessian += (d_y - centroid_y[vid]) * (icov10[vid] * ph0 + icov11[vid] * ph1 + icov12[vid] * ph2) * tmp_ex;
final_hessian += (d_z - centroid_z[vid]) * (icov20[vid] * ph0 + icov21[vid] * ph1 + icov22[vid] * ph2) * tmp_ex;
}
}
h[i] = final_hessian;
}
}
}
/* Compute sum of a list of matrices */
__global__ void matrixSum(double *matrix_list, int full_size, int half_size, int rows, int cols, int offset)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
for (int i = index; i < half_size && row < rows && col < cols; i += stride) {
MatrixDevice left(rows, cols, offset, matrix_list + i);
double *right_ptr = (i + half_size < full_size) ? matrix_list + i + half_size : NULL;
MatrixDevice right(rows, cols, offset, right_ptr);
if (right_ptr != NULL) {
left(row, col) += right(row, col);
}
}
}
/* Compute sum of score_inc list */
__global__ void sumScore(double *score, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
score[i] += (i + half_size < full_size) ? score[i + half_size] : 0;
}
}
double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z,
int points_num, Eigen::Matrix<double, 6, 1> &pose, bool compute_hessian)
{
MatrixHost p(6, 1);
for (int i = 0; i < 6; i++) {
p(i) = pose(i, 0);
}
score_gradient.setZero ();
hessian.setZero ();
//Compute Angle Derivatives
computeAngleDerivatives(p);
//Radius Search
int *valid_points, *voxel_id, *starting_voxel_id;
int valid_voxel_num, valid_points_num;
valid_points = voxel_id = starting_voxel_id = NULL;
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num);
double *covariance = voxel_grid_.getCovarianceList();
double *inverse_covariance = voxel_grid_.getInverseCovarianceList();
double *centroid = voxel_grid_.getCentroidList();
int *points_per_voxel = voxel_grid_.getPointsPerVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
if (valid_points_num == 0)
return 0;
//Update score gradient and hessian matrix
double *gradients, *hessians, *point_gradients, *point_hessians, *score;
checkCudaErrors(cudaMalloc(&gradients, sizeof(double) * valid_points_num * 6));
checkCudaErrors(cudaMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(cudaMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(cudaMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6));
checkCudaErrors(cudaMalloc(&score, sizeof(double) * valid_points_num));
checkCudaErrors(cudaMemset(gradients, 0, sizeof(double) * valid_points_num * 6));
checkCudaErrors(cudaMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(cudaMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(cudaMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
dim3 grid;
computePointGradients<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_.buffer(),
point_gradients,
point_gradients + valid_points_num * 7,
point_gradients + valid_points_num * 14,
point_gradients + valid_points_num * 9,
point_gradients + valid_points_num * 15,
point_gradients + valid_points_num * 4,
point_gradients + valid_points_num * 10,
point_gradients + valid_points_num * 16,
point_gradients + valid_points_num * 5,
point_gradients + valid_points_num * 11,
point_gradients + valid_points_num * 17);
checkCudaErrors(cudaGetLastError());
if (compute_hessian) {
computePointHessian0<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69,
point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81,
point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70,
point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99,
point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71);
checkCudaErrors(cudaGetLastError());
computePointHessian1<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88,
point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100,
point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89);
checkCudaErrors(cudaGetLastError());
computePointHessian2<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107);
checkCudaErrors(cudaGetLastError());
}
checkCudaErrors(cudaDeviceSynchronize());
double *tmp_hessian;
checkCudaErrors(cudaMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6));
double *e_x_cov_x;
checkCudaErrors(cudaMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num));
double *cov_dxd_pi;
checkCudaErrors(cudaMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6));
computeExCovX<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_,
e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num);
checkCudaErrors(cudaGetLastError());
computeScoreList<<<grid_x, block_x>>>(starting_voxel_id, voxel_id, valid_points_num, e_x_cov_x, gauss_d1_, score);
checkCudaErrors(cudaGetLastError());
int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num;
int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1;
updateExCovX<<<grid_x2, block_x2>>>(e_x_cov_x, gauss_d2_, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
grid.x = grid_x;
grid.y = 3;
grid.z = 6;
computeCovDxdPi<<<grid, block_x>>>(valid_points, starting_voxel_id, voxel_id, valid_points_num,
inverse_covariance, voxel_num,
gauss_d1_, gauss_d2_, point_gradients,
cov_dxd_pi, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
grid.x = grid_x;
grid.y = 6;
grid.z = 1;
computeScoreGradientList<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
voxel_num, e_x_cov_x,
cov_dxd_pi, gauss_d1_, valid_voxel_num, gradients);
checkCudaErrors(cudaGetLastError());
if (compute_hessian) {
grid.y = 6;
grid.z = 1;
computeHessianListS0<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_gradients, point_gradients + 6 * valid_points_num, point_gradients + 12 * valid_points_num,
tmp_hessian, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
grid.z = 6;
computeHessianListS1<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_, hessians,
e_x_cov_x, tmp_hessian, cov_dxd_pi,
point_gradients,
valid_voxel_num);
checkCudaErrors(cudaGetLastError());
computeHessianListS2<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_hessians, hessians, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
}
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
grid.x = grid_x;
grid.y = 1;
grid.z = 6;
matrixSum<<<grid, block_x>>>(gradients, full_size, half_size, 1, 6, valid_points_num);
checkCudaErrors(cudaGetLastError());
grid.y = 6;
matrixSum<<<grid, block_x>>>(hessians, full_size, half_size, 6, 6, valid_points_num);
checkCudaErrors(cudaGetLastError());
sumScore<<<grid_x, block_x>>>(score, full_size, half_size);
checkCudaErrors(cudaGetLastError());
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(cudaDeviceSynchronize());
MatrixDevice dgrad(1, 6, valid_points_num, gradients), dhess(6, 6, valid_points_num, hessians);
MatrixHost hgrad(1, 6), hhess(6, 6);
hgrad.moveToHost(dgrad);
hhess.moveToHost(dhess);
for (int i = 0; i < 6; i++) {
score_gradient(i) = hgrad(i);
}
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhess(i, j);
}
}
double score_inc;
checkCudaErrors(cudaMemcpy(&score_inc, score, sizeof(double), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(gradients));
checkCudaErrors(cudaFree(hessians));
checkCudaErrors(cudaFree(point_hessians));
checkCudaErrors(cudaFree(point_gradients));
checkCudaErrors(cudaFree(score));
checkCudaErrors(cudaFree(tmp_hessian));
checkCudaErrors(cudaFree(e_x_cov_x));
checkCudaErrors(cudaFree(cov_dxd_pi));
if (valid_points != NULL)
checkCudaErrors(cudaFree(valid_points));
if (voxel_id != NULL)
checkCudaErrors(cudaFree(voxel_id));
if (starting_voxel_id != NULL)
checkCudaErrors(cudaFree(starting_voxel_id));
return score_inc;
}
void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost &pose, bool compute_hessian)
{
double cx, cy, cz, sx, sy, sz;
if (fabs(pose(3)) < 10e-5) {
cx = 1.0;
sx = 0.0;
} else {
cx = cos(pose(3));
sx = sin(pose(3));
}
if (fabs(pose(4)) < 10e-5) {
cy = 1.0;
sy = 0.0;
} else {
cy = cos(pose(4));
sy = sin(pose(4));
}
if (fabs(pose(5)) < 10e-5) {
cz = 1.0;
sz = 0.0;
} else {
cz = cos(pose(5));
sz = sin(pose(5));
}
j_ang_(0) = -sx * sz + cx * sy * cz;
j_ang_(1) = -sx * cz - cx * sy * sz;
j_ang_(2) = -cx * cy;
j_ang_(3) = cx * sz + sx * sy * cz;
j_ang_(4) = cx * cz - sx * sy * sz;
j_ang_(5) = -sx * cy;
j_ang_(6) = -sy * cz;
j_ang_(7) = sy * sz;
j_ang_(8) = cy;
j_ang_(9) = sx * cy * cz;
j_ang_(10) = -sx * cy * sz;
j_ang_(11) = sx * sy;
j_ang_(12) = -cx * cy * cz;
j_ang_(13) = cx * cy * sz;
j_ang_(14) = -cx * sy;
j_ang_(15) = -cy * sz;
j_ang_(16) = -cy * cz;
j_ang_(17) = 0;
j_ang_(18) = cx * cz - sx * sy * sz;
j_ang_(19) = -cx * sz - sx * sy * cz;
j_ang_(20) = 0;
j_ang_(21) = sx * cz + cx * sy * sz;
j_ang_(22) = cx * sy * cz - sx * sz;
j_ang_(23) = 0;
j_ang_.moveToGpu(dj_ang_);
if (compute_hessian) {
h_ang_(0) = -cx * sz - sx * sy * cz;
h_ang_(1) = -cx * cz + sx * sy * sz;
h_ang_(2) = sx * cy;
h_ang_(3) = -sx * sz + cx * sy * cz;
h_ang_(4) = -cx * sy * sz - sx * cz;
h_ang_(5) = -cx * cy;
h_ang_(6) = cx * cy * cz;
h_ang_(7) = -cx * cy * sz;
h_ang_(8) = cx * sy;
h_ang_(9) = sx * cy * cz;
h_ang_(10) = -sx * cy * sz;
h_ang_(11) = sx * sy;
h_ang_(12) = -sx * cz - cx * sy * sz;
h_ang_(13) = sx * sz - cx * sy * cz;
h_ang_(14) = 0;
h_ang_(15) = cx * cz - sx * sy * sz;
h_ang_(16) = -sx * sy * cz - cx * sz;
h_ang_(17) = 0;
h_ang_(18) = -cy * cz;
h_ang_(19) = cy * sz;
h_ang_(20) = sy;
h_ang_(21) = -sx * sy * cz;
h_ang_(22) = sx * sy * sz;
h_ang_(23) = sx * cy;
h_ang_(24) = cx * sy * cz;
h_ang_(25) = -cx * sy * sz;
h_ang_(26) = -cx * cy;
h_ang_(27) = sy * sz;
h_ang_(28) = sy * cz;
h_ang_(29) = 0;
h_ang_(30) = -sx * cy * sz;
h_ang_(31) = -sx * cy * cz;
h_ang_(32) = 0;
h_ang_(33) = cx * cy * sz;
h_ang_(34) = cx * cy * cz;
h_ang_(35) = 0;
h_ang_(36) = -cy * cz;
h_ang_(37) = cy * sz;
h_ang_(38) = 0;
h_ang_(39) = -cx * sz - sx * sy * cz;
h_ang_(40) = -cx * cz + sx * sy * sz;
h_ang_(41) = 0;
h_ang_(42) = -sx * sz + cx * sy * cz;
h_ang_(43) = -cx * sy * sz - sx * cz;
h_ang_(44) = 0;
h_ang_.moveToGpu(dh_ang_);
}
}
__global__ void gpuTransform(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int point_num, MatrixDevice transform)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
float x, y, z;
for (int i = idx; i < point_num; i += stride) {
x = in_x[i];
y = in_y[i];
z = in_z[i];
trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3);
trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3);
trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3);
}
}
void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z,
float *trans_x, float *trans_y, float *trans_z,
int points_number, const Eigen::Matrix<float, 4, 4> &transform)
{
Eigen::Transform<float, 3, Eigen::Affine> t(transform);
MatrixHost htrans(3, 4);
MatrixDevice dtrans(3, 4);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
htrans(i, j) = t(i, j);
}
}
htrans.moveToGpu(dtrans);
if (points_number > 0) {
int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X;
int grid_x = (points_number - 1) / block_x + 1;
gpuTransform<<<grid_x, block_x >>>(in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
}
dtrans.memFree();
}
double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir,
double step_init, double step_max, double step_min, double &score,
Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian,
float *trans_x, float *trans_y, float *trans_z, int points_num)
{
double phi_0 = -score;
double d_phi_0 = -(score_gradient.dot(step_dir));
Eigen::Matrix<double, 6, 1> x_t;
if (d_phi_0 >= 0) {
if (d_phi_0 == 0)
return 0;
else {
d_phi_0 *= -1;
step_dir *= -1;
}
}
int max_step_iterations = 10;
int step_iterations = 0;
double mu = 1.e-4;
double nu = 0.9;
double a_l = 0, a_u = 0;
double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu);
double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu);
double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu);
bool interval_converged = (step_max - step_min) > 0, open_interval = true;
double a_t = step_init;
a_t = std::min(a_t, step_max);
a_t = std::max(a_t, step_min);
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t);
double phi_t = -score;
double d_phi_t = -(score_gradient.dot(step_dir));
double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) {
if (open_interval) {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
a_t = (a_t < step_max) ? a_t : step_max;
a_t = (a_t > step_min) ? a_t : step_min;
x_t = x + step_dir * a_t;
final_transformation_ = (Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()) *
Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ())).matrix();
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_);
score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false);
phi_t -= score;
d_phi_t -= (score_gradient.dot(step_dir));
psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu);
d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu);
if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) {
open_interval = false;
f_l += phi_0 - mu * d_phi_0 * a_l;
g_l += mu * d_phi_0;
f_u += phi_0 - mu * d_phi_0 * a_u;
g_u += mu * d_phi_0;
}
if (open_interval) {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t);
} else {
interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t);
}
step_iterations++;
}
if (step_iterations) {
computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t);
}
real_iterations_ += step_iterations;
return a_t;
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l,
double a_u, double f_u, double g_u,
double a_t, double f_t, double g_t)
{
// Case 1 in Trial Value Selection [More, Thuente 1994]
if (f_t > f_l) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l
// Equation 2.4.2 [Sun, Yuan 2006]
double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t));
if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l))
return (a_c);
else
return (0.5 * (a_q + a_c));
}
// Case 2 in Trial Value Selection [More, Thuente 1994]
else if (g_t * g_l < 0) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
// Equation 2.4.56 [Sun, Yuan 2006]
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t))
return (a_c);
else
return (a_s);
}
// Case 3 in Trial Value Selection [More, Thuente 1994]
else if (std::fabs (g_t) <= std::fabs (g_l)) {
// Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l;
double w = std::sqrt (z * z - g_t * g_l);
double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w);
// Calculate the minimizer of the quadratic that interpolates g_l and g_t
// Equation 2.4.5 [Sun, Yuan 2006]
double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l;
double a_t_next;
if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t))
a_t_next = a_c;
else
a_t_next = a_s;
if (a_t > a_l)
return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next));
else
return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next));
}
// Case 4 in Trial Value Selection [More, Thuente 1994]
else {
// Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t
// Equation 2.4.52 [Sun, Yuan 2006]
double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u;
double w = std::sqrt (z * z - g_t * g_u);
// Equation 2.4.56 [Sun, Yuan 2006]
return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w));
}
}
//Copied from ndt.hpp
double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l,
double &a_u, double &f_u, double &g_u,
double a_t, double f_t, double g_t)
{
// Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994]
if (f_t > f_l) {
a_u = a_t;
f_u = f_t;
g_u = g_t;
return (false);
}
// Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) > 0) {
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994]
else if (g_t * (a_l - a_t) < 0) {
a_u = a_l;
f_u = f_l;
g_u = g_l;
a_l = a_t;
f_l = f_t;
g_l = g_t;
return (false);
}
// Interval Converged
else
return (true);
}
void GNormalDistributionsTransform::computeHessian(Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p)
{
int *valid_points, *voxel_id, *starting_voxel_id;
int valid_voxel_num, valid_points_num;
//Radius Search
voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num);
double *centroid = voxel_grid_.getCentroidList();
double *covariance = voxel_grid_.getCovarianceList();
double *inverse_covariance = voxel_grid_.getInverseCovarianceList();
int *points_per_voxel = voxel_grid_.getPointsPerVoxelList();
int voxel_num = voxel_grid_.getVoxelNum();
if (valid_points_num <= 0)
return;
//Update score gradient and hessian matrix
double *hessians, *point_gradients, *point_hessians;
checkCudaErrors(cudaMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(cudaMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(cudaMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6));
checkCudaErrors(cudaMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6));
checkCudaErrors(cudaMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6));
checkCudaErrors(cudaMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6));
int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num;
int grid_x = (valid_points_num - 1) / block_x + 1;
dim3 grid;
computePointGradients<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dj_ang_.buffer(),
point_gradients,
point_gradients + valid_points_num * 7,
point_gradients + valid_points_num * 14,
point_gradients + valid_points_num * 9,
point_gradients + valid_points_num * 15,
point_gradients + valid_points_num * 4,
point_gradients + valid_points_num * 10,
point_gradients + valid_points_num * 16,
point_gradients + valid_points_num * 5,
point_gradients + valid_points_num * 11,
point_gradients + valid_points_num * 17);
checkCudaErrors(cudaGetLastError());
computePointHessian0<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69,
point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81,
point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70,
point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99,
point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71);
checkCudaErrors(cudaGetLastError());
computePointHessian1<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88,
point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100,
point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89);
checkCudaErrors(cudaGetLastError());
computePointHessian2<<<grid_x, block_x>>>(x_, y_, z_, points_number_,
valid_points, valid_points_num,
dh_ang_.buffer(),
point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107);
checkCudaErrors(cudaGetLastError());
double *tmp_hessian;
checkCudaErrors(cudaMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6));
double *e_x_cov_x;
checkCudaErrors(cudaMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num));
double *cov_dxd_pi;
checkCudaErrors(cudaMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6));
computeExCovX<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_,
e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num);
checkCudaErrors(cudaGetLastError());
grid.x = grid_x;
grid.y = 3;
grid.z = 6;
computeCovDxdPi<<<grid, block_x>>>(valid_points, starting_voxel_id, voxel_id, valid_points_num,
inverse_covariance, voxel_num,
gauss_d1_, gauss_d2_, point_gradients,
cov_dxd_pi, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num;
int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1;
updateExCovX<<<grid_x2, block_x2>>>(e_x_cov_x, gauss_d2_, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
grid.y = 6;
grid.z = 1;
computeHessianListS0<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_gradients, point_gradients + 6 * valid_points_num, point_gradients + 12 * valid_points_num,
tmp_hessian, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
grid.z = 6;
computeHessianListS1<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, gauss_d2_, hessians,
e_x_cov_x, tmp_hessian, cov_dxd_pi,
point_gradients,
valid_voxel_num);
checkCudaErrors(cudaGetLastError());
computeHessianListS2<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points,
starting_voxel_id, voxel_id, valid_points_num,
centroid, centroid + voxel_num, centroid + 2 * voxel_num,
gauss_d1_, e_x_cov_x,
inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num,
inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num,
inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num,
point_hessians, hessians, valid_voxel_num);
checkCudaErrors(cudaGetLastError());
int full_size = valid_points_num;
int half_size = (full_size - 1) / 2 + 1;
while (full_size > 1) {
block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
grid_x = (half_size - 1) / block_x + 1;
grid.x = grid_x;
grid.y = 6;
grid.z = 6;
matrixSum<<<grid_x, block_x>>>(hessians, full_size, half_size, 6, 6, valid_points_num);
full_size = half_size;
half_size = (full_size - 1) / 2 + 1;
}
checkCudaErrors(cudaDeviceSynchronize());
MatrixDevice dhessian(6, 6, valid_points_num, hessians);
MatrixHost hhessian(6, 6);
hhessian.moveToHost(dhessian);
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
hessian(i, j) = hhessian(i, j);
}
}
checkCudaErrors(cudaFree(hessians));
checkCudaErrors(cudaFree(point_hessians));
checkCudaErrors(cudaFree(point_gradients));
checkCudaErrors(cudaFree(tmp_hessian));
checkCudaErrors(cudaFree(e_x_cov_x));
checkCudaErrors(cudaFree(cov_dxd_pi));
if (valid_points != NULL) {
checkCudaErrors(cudaFree(valid_points));
}
if (voxel_id != NULL) {
checkCudaErrors(cudaFree(voxel_id));
}
if (starting_voxel_id != NULL) {
checkCudaErrors(cudaFree(starting_voxel_id));
}
dhessian.memFree();
}
template <typename T>
__global__ void gpuSum(T *input, int size, int half_size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < half_size; i += stride) {
if (i + half_size < size) {
input[i] += (half_size < size) ? input[i + half_size] : 0;
}
}
}
double GNormalDistributionsTransform::getFitnessScore(double max_range)
{
double fitness_score = 0.0;
float *trans_x, *trans_y, *trans_z;
checkCudaErrors(cudaMalloc(&trans_x, sizeof(float) * points_number_));
checkCudaErrors(cudaMalloc(&trans_y, sizeof(float) * points_number_));
checkCudaErrors(cudaMalloc(&trans_z, sizeof(float) * points_number_));
transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_number_, final_transformation_);
int *valid_distance;
checkCudaErrors(cudaMalloc(&valid_distance, sizeof(int) * points_number_));
double *min_distance;
checkCudaErrors(cudaMalloc(&min_distance, sizeof(double) * points_number_));
voxel_grid_.nearestNeighborSearch(trans_x, trans_y, trans_z, points_number_, valid_distance, min_distance, max_range);
int size = points_number_;
int half_size;
while (size > 1) {
half_size = (size - 1) / 2 + 1;
int block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size;
int grid_x = (half_size - 1) / block_x + 1;
gpuSum<double><<<grid_x, block_x>>>(min_distance, size, half_size);
checkCudaErrors(cudaGetLastError());
gpuSum<int><<<grid_x, block_x>>>(valid_distance, size, half_size);
checkCudaErrors(cudaGetLastError());
size = half_size;
}
checkCudaErrors(cudaDeviceSynchronize());
int nr;
checkCudaErrors(cudaMemcpy(&nr, valid_distance, sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&fitness_score, min_distance, sizeof(double), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(trans_x));
checkCudaErrors(cudaFree(trans_y));
checkCudaErrors(cudaFree(trans_z));
checkCudaErrors(cudaFree(valid_distance));
checkCudaErrors(cudaFree(min_distance));
if (nr > 0)
return (fitness_score / nr);
return DBL_MAX;
}
}
|
1558f2e1a8f25f6d936fcae590a844f21b72689e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
if (comp <= (-1.6698E-41f / (-1.7344E11f * cosf((var_1 * (-0.0f / var_2 - var_3)))))) {
if (comp == floorf(cosf((var_4 - sinf((-0.0f * (var_5 + sqrtf(+1.4745E13f)))))))) {
comp = (+1.8175E-43f + (+1.0324E5f - (var_6 / (var_7 / +1.6175E-41f))));
comp = (var_8 / log10f(+0.0f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
hipDeviceSynchronize();
return 0;
}
| 1558f2e1a8f25f6d936fcae590a844f21b72689e.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
if (comp <= (-1.6698E-41f / (-1.7344E11f * cosf((var_1 * (-0.0f / var_2 - var_3)))))) {
if (comp == floorf(cosf((var_4 - sinf((-0.0f * (var_5 + sqrtf(+1.4745E13f)))))))) {
comp = (+1.8175E-43f + (+1.0324E5f - (var_6 / (var_7 / +1.6175E-41f))));
comp = (var_8 / log10f(+0.0f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
cudaDeviceSynchronize();
return 0;
}
|
e1dbdf126cdcfae7476ef6969b11c35a219d4ed5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file src/cuda/convolution/backward_data/deconv_int8_helper.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/convolution/backward_data/deconv_int8_helper.cuh"
#include "src/cuda/transpose_utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace deconv;
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
namespace {
__global__ void reorder_filter_nc4hw4_to_n4hwc4_kernel(
int8_t* __restrict__ dst, const int8_t* __restrict__ src, uint32_t OC,
uint32_t IC, uint32_t FHFW) {
const int32_t ocb = blockIdx.z;
const int32_t icb = blockIdx.y * BLOCKSIZE_X + threadIdx.y;
const int32_t fhfw = blockIdx.x * BLOCKSIZE_Y + threadIdx.x;
if (fhfw < FHFW && icb < IC / 4) {
array_wrapper<int, 4> src_value;
int dst_value[4];
#pragma unroll
for (int i = 0; i < 4; i++) {
src_value[i] = *reinterpret_cast<const int*>(
src + (ocb * 4 + i) * IC * FHFW + (icb * FHFW + fhfw) * 4);
}
// transpose 4x4
auto trans = transpose_int8_interleavedx4<4, int>();
trans(src_value, dst_value);
#pragma unroll
for (int i = 0; i < 4; i++) {
*reinterpret_cast<int*>(
dst + (ocb * FHFW * IC + fhfw * IC + icb * 4 + i) * 4) =
dst_value[i];
}
}
}
template <uint32_t interleaved, typename vec_type>
__global__ void reorder_filter_nhwc_to_cnxhwx_kernel(
int8_t* __restrict__ dst, const int8_t* __restrict__ src, uint32_t OC,
uint32_t IC, uint32_t FHFW) {
uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x;
const int32_t ocb = lane / (FHFW * IC / 4);
const int32_t fhfw_icb = lane % (FHFW * IC / 4);
const int32_t fhfw = fhfw_icb / (IC / 4);
const int32_t icb = fhfw_icb % (IC / 4);
if (ocb < OC / interleaved && fhfw < FHFW) {
array_wrapper<int, interleaved> src_value;
vec_type dst_value[4];
#pragma unroll
for (int i = 0; i < interleaved; i++) {
src_value[i] = *reinterpret_cast<const int*>(
src + (ocb * interleaved + i) * FHFW * IC + fhfw * IC + icb * 4);
}
auto trans = transpose_int8_interleavedx4<interleaved, vec_type>();
trans(src_value, dst_value);
#pragma unroll
for (int i = 0; i < 4; i++) {
*reinterpret_cast<vec_type*>(
dst + (icb * 4 + i) * FHFW * OC +
(ocb * FHFW + fhfw) * interleaved) = dst_value[i];
}
}
}
} // namespace
void megdnn::cuda::deconv::reorder_filter_nc4hw4_to_n4hwc4(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, hipStream_t stream) {
dim3 threads(BLOCKSIZE_X, BLOCKSIZE_Y, 1);
dim3 blocks(DIVUP(FH * FW, BLOCKSIZE_X), DIVUP(IC / 4, BLOCKSIZE_Y), OC / 4);
hipLaunchKernelGGL(( reorder_filter_nc4hw4_to_n4hwc4_kernel), dim3(blocks), dim3(threads), 0, stream,
dst, src, OC, IC, FH * FW);
after_kernel_launch();
}
void megdnn::cuda::deconv::reorder_filter_nhwc_to_cnxhwx(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, uint32_t interleaved, hipStream_t stream) {
int32_t vthreads = OC / interleaved * IC / 4 * FH * FW;
int32_t nr_threads = ::min(256, vthreads);
int32_t nr_blocks = DIVUP(vthreads, nr_threads);
if (interleaved == 4) {
hipLaunchKernelGGL(( reorder_filter_nhwc_to_cnxhwx_kernel<4, int>)
, dim3(nr_blocks), dim3(nr_threads), 0, stream, dst, src, OC, IC, FH * FW);
} else if (interleaved == 8) {
hipLaunchKernelGGL(( reorder_filter_nhwc_to_cnxhwx_kernel<8, int2>)
, dim3(nr_blocks), dim3(nr_threads), 0, stream, dst, src, OC, IC, FH * FW);
} else {
hipLaunchKernelGGL(( reorder_filter_nhwc_to_cnxhwx_kernel<16, int4>)
, dim3(nr_blocks), dim3(nr_threads), 0, stream, dst, src, OC, IC, FH * FW);
}
after_kernel_launch();
}
// vim: syntax=cuda.doxygen
| e1dbdf126cdcfae7476ef6969b11c35a219d4ed5.cu | /**
* \file src/cuda/convolution/backward_data/deconv_int8_helper.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/convolution/backward_data/deconv_int8_helper.cuh"
#include "src/cuda/transpose_utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace deconv;
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
namespace {
__global__ void reorder_filter_nc4hw4_to_n4hwc4_kernel(
int8_t* __restrict__ dst, const int8_t* __restrict__ src, uint32_t OC,
uint32_t IC, uint32_t FHFW) {
const int32_t ocb = blockIdx.z;
const int32_t icb = blockIdx.y * BLOCKSIZE_X + threadIdx.y;
const int32_t fhfw = blockIdx.x * BLOCKSIZE_Y + threadIdx.x;
if (fhfw < FHFW && icb < IC / 4) {
array_wrapper<int, 4> src_value;
int dst_value[4];
#pragma unroll
for (int i = 0; i < 4; i++) {
src_value[i] = *reinterpret_cast<const int*>(
src + (ocb * 4 + i) * IC * FHFW + (icb * FHFW + fhfw) * 4);
}
// transpose 4x4
auto trans = transpose_int8_interleavedx4<4, int>();
trans(src_value, dst_value);
#pragma unroll
for (int i = 0; i < 4; i++) {
*reinterpret_cast<int*>(
dst + (ocb * FHFW * IC + fhfw * IC + icb * 4 + i) * 4) =
dst_value[i];
}
}
}
template <uint32_t interleaved, typename vec_type>
__global__ void reorder_filter_nhwc_to_cnxhwx_kernel(
int8_t* __restrict__ dst, const int8_t* __restrict__ src, uint32_t OC,
uint32_t IC, uint32_t FHFW) {
uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x;
const int32_t ocb = lane / (FHFW * IC / 4);
const int32_t fhfw_icb = lane % (FHFW * IC / 4);
const int32_t fhfw = fhfw_icb / (IC / 4);
const int32_t icb = fhfw_icb % (IC / 4);
if (ocb < OC / interleaved && fhfw < FHFW) {
array_wrapper<int, interleaved> src_value;
vec_type dst_value[4];
#pragma unroll
for (int i = 0; i < interleaved; i++) {
src_value[i] = *reinterpret_cast<const int*>(
src + (ocb * interleaved + i) * FHFW * IC + fhfw * IC + icb * 4);
}
auto trans = transpose_int8_interleavedx4<interleaved, vec_type>();
trans(src_value, dst_value);
#pragma unroll
for (int i = 0; i < 4; i++) {
*reinterpret_cast<vec_type*>(
dst + (icb * 4 + i) * FHFW * OC +
(ocb * FHFW + fhfw) * interleaved) = dst_value[i];
}
}
}
} // namespace
void megdnn::cuda::deconv::reorder_filter_nc4hw4_to_n4hwc4(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, cudaStream_t stream) {
dim3 threads(BLOCKSIZE_X, BLOCKSIZE_Y, 1);
dim3 blocks(DIVUP(FH * FW, BLOCKSIZE_X), DIVUP(IC / 4, BLOCKSIZE_Y), OC / 4);
reorder_filter_nc4hw4_to_n4hwc4_kernel<<<blocks, threads, 0, stream>>>(
dst, src, OC, IC, FH * FW);
after_kernel_launch();
}
void megdnn::cuda::deconv::reorder_filter_nhwc_to_cnxhwx(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, uint32_t interleaved, cudaStream_t stream) {
int32_t vthreads = OC / interleaved * IC / 4 * FH * FW;
int32_t nr_threads = std::min(256, vthreads);
int32_t nr_blocks = DIVUP(vthreads, nr_threads);
if (interleaved == 4) {
reorder_filter_nhwc_to_cnxhwx_kernel<4, int>
<<<nr_blocks, nr_threads, 0, stream>>>(dst, src, OC, IC, FH * FW);
} else if (interleaved == 8) {
reorder_filter_nhwc_to_cnxhwx_kernel<8, int2>
<<<nr_blocks, nr_threads, 0, stream>>>(dst, src, OC, IC, FH * FW);
} else {
reorder_filter_nhwc_to_cnxhwx_kernel<16, int4>
<<<nr_blocks, nr_threads, 0, stream>>>(dst, src, OC, IC, FH * FW);
}
after_kernel_launch();
}
// vim: syntax=cuda.doxygen
|
74d49e5d8eb697d2e3c4ceca8508257b771cbc23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Azzam Haidar
@author Tingxing Dong
@generated from zgemv_batched.cu normal z -> d, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define dgemv_bs 32
extern __shared__ double shared_data[];
__global__ void
kernel_dgemvn_batched(
int m, int n, double alpha,
double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy)
{
double *A = dA_array[blockIdx.x];
double *x = x_array[blockIdx.x];
double *y = y_array[blockIdx.x];
int tx = threadIdx.x;
double res = MAGMA_D_ZERO;
double *buff = (double*)shared_data;
if(tx < n)
{
buff[tx] = x[tx*incx];
}
__syncthreads();
if(tx < m )
{
for(int j=0; j < n ; j++)
{
res += A[tx]*buff[j];
A += lda;
}
y[tx*incy] = alpha * res + y[tx*incy] * beta;
}
}
/*
Matrix Non-transpose Vector Multiplication
y := alpha*A*x + beta*y,
*/
extern "C"
void magmablas_dgemvn_batched(
int m, int n,
double alpha, double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy,
int batchCount)
{
if( m > 512 || n > 512)
{
fprintf( stderr, "m=%d, n=%d, dgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_dgemv instead", m, n, 512);
return ;
}
dim3 grid(batchCount, 1, 1);
dim3 threads(max(m,n), 1, 1);
hipLaunchKernelGGL(( kernel_dgemvn_batched), dim3(grid), dim3(threads), n * sizeof(double) , 0, m, n, alpha, dA_array, lda, x_array, incx,
beta, y_array, incy);
}
__global__ void
kernel_dgemvt_batched(
int m, int n, int m1, double alpha,
double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy)
{
double *A_ptr = dA_array[blockIdx.x];
double *x_ptr = x_array[blockIdx.x];
double *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
double res = MAGMA_D_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ double sdata[dgemv_bs];
for(int i=0; i<m1; i+= dgemv_bs)
{
res += A_ptr[i] * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += A_ptr[m1] * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(dgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Transpose Vector Multiplication
y := alpha* A**T *x + beta*y,
*/
extern "C"
void magmablas_dgemvt_batched(
int m, int n,
double alpha, double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(dgemv_bs, 1, 1);
int m1 = (m / dgemv_bs) * dgemv_bs;
hipLaunchKernelGGL(( kernel_dgemvt_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#if defined(PRECISION_z) || defined (PRECISION_c)
__global__ void
kernel_dgemvc_batched(
int m, int n, int m1, double alpha,
double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy)
{
double *A_ptr = dA_array[blockIdx.x];
double *x_ptr = x_array[blockIdx.x];
double *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
double res = MAGMA_D_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ double sdata[dgemv_bs];
for(int i=0; i<m1; i+= dgemv_bs)
{
res += MAGMA_D_CNJG (A_ptr[i]) * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += MAGMA_D_CNJG(A_ptr[m1]) * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(dgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Conjugate Transpose Vector Multiplication
y := alpha* A**H *x + beta*y,
*/
extern "C"
void magmablas_dgemvc_batched(
int m, int n,
double alpha, double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(dgemv_bs, 1, 1);
int m1 = (m / dgemv_bs) * dgemv_bs;
hipLaunchKernelGGL(( kernel_dgemvc_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#endif // defined(PRECISION_z) || defined (PRECISION_c)
/**
Purpose
-------
This routine computes Y = alpha opt(A) x + beta y, on the GPU, where
A = dA_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1].
This is a batched version.
@param[in]
trans CHARACTER*1.
On entry, TRANS specifies the form of op( A ) to be used in
the matrix multiplication as follows:
= 'N': op( A ) = A.
= 'T': op( A ) = A**T.
= 'C': op( A ) = A**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix opt(A).
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix opt(A)
@param[in]
alpha DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array A = dA_array[i]
A: DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in]
x_array x = x_array[i]
x: DOUBLE PRECISION array of dimension.
n if trans == MagmaNoTrans.
m if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incx INTEGER.
incx specifies the increment for the elments of x.
incx must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta.
@param[out]
y_array y = y_array[i]:
On exit y = alpha opt(A) x + beta y.
y: DOUBLE PRECISION array of dimension.
m if trans == MagmaNoTrans.
n if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incy INTEGER.
incy specifies the increment for the elments of y.
incy must not be zero.
@param[in]
batchCount INTEGER
number of pointers contained in dA_array, x_array and y_array.
@ingroup magma_dblas2
******************************************************************* */
extern "C"
void magmablas_dgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dA_array[], magma_int_t ldda,
magmaDouble_ptr dx_array[], magma_int_t incx,
double beta,
magmaDouble_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if(m==0 || n ==0 ) return;
if ( trans == MagmaNoTrans ) {
magmablas_dgemvn_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaTrans ) {
magmablas_dgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaConjTrans ) {
#if defined(PRECISION_z) || defined (PRECISION_c)
magmablas_dgemvc_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#else
magmablas_dgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#endif
}
else {
fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) );
}
}
#undef dgemv_bs
| 74d49e5d8eb697d2e3c4ceca8508257b771cbc23.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Azzam Haidar
@author Tingxing Dong
@generated from zgemv_batched.cu normal z -> d, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define dgemv_bs 32
extern __shared__ double shared_data[];
__global__ void
kernel_dgemvn_batched(
int m, int n, double alpha,
double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy)
{
double *A = dA_array[blockIdx.x];
double *x = x_array[blockIdx.x];
double *y = y_array[blockIdx.x];
int tx = threadIdx.x;
double res = MAGMA_D_ZERO;
double *buff = (double*)shared_data;
if(tx < n)
{
buff[tx] = x[tx*incx];
}
__syncthreads();
if(tx < m )
{
for(int j=0; j < n ; j++)
{
res += A[tx]*buff[j];
A += lda;
}
y[tx*incy] = alpha * res + y[tx*incy] * beta;
}
}
/*
Matrix Non-transpose Vector Multiplication
y := alpha*A*x + beta*y,
*/
extern "C"
void magmablas_dgemvn_batched(
int m, int n,
double alpha, double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy,
int batchCount)
{
if( m > 512 || n > 512)
{
fprintf( stderr, "m=%d, n=%d, dgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_dgemv instead", m, n, 512);
return ;
}
dim3 grid(batchCount, 1, 1);
dim3 threads(max(m,n), 1, 1);
kernel_dgemvn_batched<<< grid, threads, n * sizeof(double) >>>( m, n, alpha, dA_array, lda, x_array, incx,
beta, y_array, incy);
}
__global__ void
kernel_dgemvt_batched(
int m, int n, int m1, double alpha,
double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy)
{
double *A_ptr = dA_array[blockIdx.x];
double *x_ptr = x_array[blockIdx.x];
double *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
double res = MAGMA_D_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ double sdata[dgemv_bs];
for(int i=0; i<m1; i+= dgemv_bs)
{
res += A_ptr[i] * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += A_ptr[m1] * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(dgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Transpose Vector Multiplication
y := alpha* A**T *x + beta*y,
*/
extern "C"
void magmablas_dgemvt_batched(
int m, int n,
double alpha, double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(dgemv_bs, 1, 1);
int m1 = (m / dgemv_bs) * dgemv_bs;
kernel_dgemvt_batched <<< grid, threads >>>(m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#if defined(PRECISION_z) || defined (PRECISION_c)
__global__ void
kernel_dgemvc_batched(
int m, int n, int m1, double alpha,
double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy)
{
double *A_ptr = dA_array[blockIdx.x];
double *x_ptr = x_array[blockIdx.x];
double *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
double res = MAGMA_D_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ double sdata[dgemv_bs];
for(int i=0; i<m1; i+= dgemv_bs)
{
res += MAGMA_D_CNJG (A_ptr[i]) * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += MAGMA_D_CNJG(A_ptr[m1]) * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(dgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Conjugate Transpose Vector Multiplication
y := alpha* A**H *x + beta*y,
*/
extern "C"
void magmablas_dgemvc_batched(
int m, int n,
double alpha, double **dA_array, int lda,
double **x_array, int incx,
double beta, double **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(dgemv_bs, 1, 1);
int m1 = (m / dgemv_bs) * dgemv_bs;
kernel_dgemvc_batched <<< grid, threads >>>(m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#endif // defined(PRECISION_z) || defined (PRECISION_c)
/**
Purpose
-------
This routine computes Y = alpha opt(A) x + beta y, on the GPU, where
A = dA_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1].
This is a batched version.
@param[in]
trans CHARACTER*1.
On entry, TRANS specifies the form of op( A ) to be used in
the matrix multiplication as follows:
= 'N': op( A ) = A.
= 'T': op( A ) = A**T.
= 'C': op( A ) = A**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix opt(A).
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix opt(A)
@param[in]
alpha DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array A = dA_array[i]
A: DOUBLE PRECISION array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in]
x_array x = x_array[i]
x: DOUBLE PRECISION array of dimension.
n if trans == MagmaNoTrans.
m if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incx INTEGER.
incx specifies the increment for the elments of x.
incx must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta.
@param[out]
y_array y = y_array[i]:
On exit y = alpha opt(A) x + beta y.
y: DOUBLE PRECISION array of dimension.
m if trans == MagmaNoTrans.
n if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incy INTEGER.
incy specifies the increment for the elments of y.
incy must not be zero.
@param[in]
batchCount INTEGER
number of pointers contained in dA_array, x_array and y_array.
@ingroup magma_dblas2
******************************************************************* */
extern "C"
void magmablas_dgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dA_array[], magma_int_t ldda,
magmaDouble_ptr dx_array[], magma_int_t incx,
double beta,
magmaDouble_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if(m==0 || n ==0 ) return;
if ( trans == MagmaNoTrans ) {
magmablas_dgemvn_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaTrans ) {
magmablas_dgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaConjTrans ) {
#if defined(PRECISION_z) || defined (PRECISION_c)
magmablas_dgemvc_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#else
magmablas_dgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#endif
}
else {
fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) );
}
}
#undef dgemv_bs
|
0e47bf49658187a6487af79d3920f55687c8bc27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val ? val:mat[i];
}
} | 0e47bf49658187a6487af79d3920f55687c8bc27.cu | #include "includes.h"
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val ? val:mat[i];
}
} |
5157c74d50b73999a1514bbf67a98a7ba1269b82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void decrement_dynamic_kernel(int* pInts, size_t numInts)
{
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx > numInts)
return;
pInts[idx] -= 1;
} | 5157c74d50b73999a1514bbf67a98a7ba1269b82.cu | #include "includes.h"
__global__ void decrement_dynamic_kernel(int* pInts, size_t numInts)
{
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx > numInts)
return;
pInts[idx] -= 1;
} |
d97dfd1911dbe656d48045ceacfa3bf1c219af45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<sys/time.h>
#define BLOCKSIZEX 128
#define BLOCKSIZEY 4
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
#define GRIDSIZEX 4
#define GRIDSIZEY 64
#define GRIDSIZE GRIDSIZEX * GRIDSIZEY
#define THREAD_NUM BLOCKSIZE * GRIDSIZE
#define MIMAX 256
#define MJMAX GRIDSIZEY * BLOCKSIZEY + 2
#define MKMAX GRIDSIZEX * BLOCKSIZEX + 2
#define NN 750
/*static float p[MIMAX][MJMAX][MKMAX];
static float a[MIMAX][MJMAX][MKMAX][4];
static float b[MIMAX][MJMAX][MKMAX][3];
static float c[MIMAX][MJMAX][MKMAX][3];
static float bnd[MIMAX][MJMAX][MKMAX];
static float work1[MIMAX][MJMAX][MKMAX];
static float work2[MIMAX][MJMAX][MKMAX];*/
static int imax, jmax, kmax, mimax, mjmax, mkmax;
static float omega;
double second(){
struct timeval tm;
double t;
static int base_sec = 0, base_usec = 0;
gettimeofday(&tm, NULL);
if(base_sec == 0 && base_usec == 0){
base_sec = tm.tv_sec;
base_usec = tm.tv_usec;
t = 0.0;
}
else{
t = (double)(tm.tv_sec-base_sec) + ((double)(tm.tv_usec-base_usec))/1.0e6;
}
return t;
}
__global__ void jacobi(float *a0, float *a1, float *a2, float *a3, float *b0, float *b1, float *b2, float *c0, float *c1, float *c2, float *p, float *wrk1, float *wrk2, float *bnd, int nn, int imax, int jmax, int kmax, float omega, float *gosa){
int i, j, k, n, xy, c, csb;
float s0, ss, temp;
//const int size = (imax-1)/(imax-1);
k = threadIdx.x + blockDim.x * blockIdx.x + 1;
j = threadIdx.y + blockDim.y * blockIdx.y + 1;
const int tid = (k-1) + (j-1) * (kmax-2);
xy = kmax * jmax;
extern __shared__ float sb[];
float *sb_t = sb;
float *sb_m = sb + (blockDim.x + 2) * (blockDim.y + 2);
float *sb_b = sb + 2 * (blockDim.x + 2) * (blockDim.y + 2);
csb = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2);
for(n=0;n<nn;++n){
c = j * kmax + k;
temp = 0.0;
//printf("%d, %f\n", 2*(blockDim.x + 2) * (blockDim.y + 2) , sb_t[0]);
sb_m[csb] = p[c];
sb_b[csb] = p[c+xy];
if(threadIdx.x == 0){
sb_m[csb-1] = p[c-1];
sb_b[csb-1] = p[c+xy-1];
}
if(threadIdx.x == blockDim.x-1){
sb_m[csb+1] = p[c+1];
sb_b[csb+1] = p[c+xy+1];
}
if(threadIdx.y == 0){
sb_m[csb-blockDim.x-2] = p[c-kmax];
sb_b[csb-blockDim.x-2] = p[c+xy-kmax];
}
if(threadIdx.y == blockDim.y-1){
sb_m[csb+blockDim.x+2] = p[c+kmax];
sb_b[csb+blockDim.x+2] = p[c+xy+kmax];
}
__syncthreads();
for(i=1 ; i<imax-1 ; ++i){
c += xy;
float *sb_tmp = sb_t;
sb_t = sb_m;
sb_m = sb_b;
if(threadIdx.x == 0 && threadIdx.y == 0){
sb_m[0] = p[c-kmax-1];
sb_m[blockDim.x+2] = p[c-kmax+1];
sb_m[(blockDim.y+1)*(blockDim.x+2)] = p[c+kmax-1];
sb_m[(blockDim.y+2)*(blockDim.x+2)] = p[c+kmax+1];
}
sb_b = sb_tmp;
sb_b[csb] = p[c+xy];
if(threadIdx.x == 0){ sb_b[csb-1] = p[c+xy-1];}
if(threadIdx.x == blockDim.x-1){ sb_b[csb+1] = p[c+xy+1];}
if(threadIdx.y == 0){ sb_b[csb-blockDim.x-2] = p[c+xy-kmax];}
if(threadIdx.y == blockDim.y-1){ sb_b[csb+blockDim.x+2] = p[c+xy+kmax];}
__syncthreads();
s0 =
a0[i*jmax*kmax+j*kmax+k] * sb_b[csb]
+ a1[i*jmax*kmax+j*kmax+k] * sb_m[csb + blockDim.x + 2]
+ a2[i*jmax*kmax+j*kmax+k] * sb_m[csb + 1]
+ b0[i*jmax*kmax+j*kmax+k] * (
sb_b[csb + blockDim.x + 2]
- sb_b[csb - blockDim.x - 2]
- sb_t[csb + blockDim.x + 2]
+ sb_t[csb - blockDim.x - 2])
+ b1[i*jmax*kmax+j*kmax+k] *(
sb_m[csb + blockDim.x + 3]
- sb_m[csb - blockDim.x - 1]
- sb_m[csb - blockDim.x - 3]
+ sb_m[csb + blockDim.x + 1])
+ b2[i*jmax*kmax+j*kmax+k] *(
sb_b[csb + 1]
- sb_t[csb + 1]
- sb_b[csb - 1]
+ sb_t[csb - 1])
+ c0[i*jmax*kmax+j*kmax+k] * sb_t[csb]
+ c1[i*jmax*kmax+j*kmax+k] * sb_m[csb - blockDim.x - 2]
+ c2[i*jmax*kmax+j*kmax+k] * sb_m[csb - 1]
+ wrk1[i*jmax*kmax+j*kmax+k];
ss = ( s0 * a3[i*jmax*kmax+j*kmax+k] - sb_m[csb] ) * bnd[i*jmax*kmax+j*kmax+k];
temp = temp + ss * ss;
wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss;
}
for(i=1 ; i<imax-1 ; i++){
p[i*jmax*kmax+j*kmax+k] = wrk2[i*jmax*kmax+j*kmax+k];
}
} /* end n loop */
//printf("%d: temp = %d\n", tid, temp);
//printf("shared: %f", sb[csb]);
gosa[tid] = temp;
}
int main(){
int i, j, k;
float final_gosa;
double cpu0, cpu1, nflop, xmflops2, score;
float gosa[THREAD_NUM];
/************************************/
float *p;
float *a0, *a1, *a2, *a3;
float *b0, *b1, *b2;
float *c0, *c1, *c2;
float *bnd;
float *wrk1, *wrk2;
/************************************/
mimax = MIMAX;
mjmax = MJMAX;
mkmax = MKMAX;
imax = MIMAX-1;
jmax = MJMAX-1;
kmax = MKMAX-1;
//int N_IJK = MIMAX*MJMAX*MKMAX;
int N_IJK = mimax*mjmax*mkmax;
/************************************/
float *dev_p;
float *dev_a0, *dev_a1, *dev_a2, *dev_a3;
float *dev_b0, *dev_b1, *dev_b2;
float *dev_c0, *dev_c1, *dev_c2;
float *dev_bnd;
float *dev_wrk1, *dev_wrk2;
float *dev_gosa;
/************************************/
omega = 0.8;
//initial_maxtrix();
/******allocate mem on CPU***********/
a0 = (float*)malloc(sizeof(float)*N_IJK);
a1 = (float*)malloc(sizeof(float)*N_IJK);
a2 = (float*)malloc(sizeof(float)*N_IJK);
a3 = (float*)malloc(sizeof(float)*N_IJK);
b0 = (float*)malloc(sizeof(float)*N_IJK);
b1 = (float*)malloc(sizeof(float)*N_IJK);
b2 = (float*)malloc(sizeof(float)*N_IJK);
c0 = (float*)malloc(sizeof(float)*N_IJK);
c1 = (float*)malloc(sizeof(float)*N_IJK);
c2 = (float*)malloc(sizeof(float)*N_IJK);
p = (float*)malloc(sizeof(float)*N_IJK);
wrk1 = (float*)malloc(sizeof(float)*N_IJK);
wrk2 = (float*)malloc(sizeof(float)*N_IJK);
bnd = (float*)malloc(sizeof(float)*N_IJK);
//gosa = (float*)malloc(sizeof(float));
/************************************/
/******allocate mem on GPU***********/
hipMalloc((void**)&dev_a0, N_IJK*sizeof(float));
hipMalloc((void**)&dev_a1, N_IJK*sizeof(float));
hipMalloc((void**)&dev_a2, N_IJK*sizeof(float));
hipMalloc((void**)&dev_a3, N_IJK*sizeof(float));
hipMalloc((void**)&dev_b0, N_IJK*sizeof(float));
hipMalloc((void**)&dev_b1, N_IJK*sizeof(float));
hipMalloc((void**)&dev_b2, N_IJK*sizeof(float));
hipMalloc((void**)&dev_c0, N_IJK*sizeof(float));
hipMalloc((void**)&dev_c1, N_IJK*sizeof(float));
hipMalloc((void**)&dev_c2, N_IJK*sizeof(float));
hipMalloc((void**)&dev_p, N_IJK*sizeof(float));
hipMalloc((void**)&dev_bnd, N_IJK*sizeof(float));
hipMalloc((void**)&dev_wrk1, N_IJK*sizeof(float));
hipMalloc((void**)&dev_wrk2, N_IJK*sizeof(float));
hipMalloc((void**)&dev_gosa, sizeof(float)*THREAD_NUM);
/************************************/
/*****Initialize*********************/
//int i,j,k;
for(i=0 ; i<mimax ; ++i){
for(j=0 ; j<mjmax ; ++j){
for(k=0 ; k<mkmax ; ++k){
a0[i*mjmax*mkmax+j*mkmax+k]=0.0;
a1[i*mjmax*mkmax+j*mkmax+k]=0.0;
a2[i*mjmax*mkmax+j*mkmax+k]=0.0;
a3[i*mjmax*mkmax+j*mkmax+k]=0.0;
b0[i*mjmax*mkmax+j*mkmax+k]=0.0;
b1[i*mjmax*mkmax+j*mkmax+k]=0.0;
b2[i*mjmax*mkmax+j*mkmax+k]=0.0;
c0[i*mjmax*mkmax+j*mkmax+k]=0.0;
c1[i*mjmax*mkmax+j*mkmax+k]=0.0;
c2[i*mjmax*mkmax+j*mkmax+k]=0.0;
p[i*mjmax*mkmax+j*mkmax+k]=0.0;
wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0;
bnd[i*mjmax*mkmax+j*mkmax+k]=0.0;
}
}
}
for(i=0 ; i<mimax ; ++i){
for(j=0 ; j<mjmax ; ++j){
for(k=0 ; k<mkmax ; ++k){
a0[i*mjmax*mkmax+j*mkmax+k]=1.0;
a1[i*mjmax*mkmax+j*mkmax+k]=1.0;
a2[i*mjmax*mkmax+j*mkmax+k]=1.0;
a3[i*mjmax*mkmax+j*mkmax+k]=1.0/6.0;
b0[i*mjmax*mkmax+j*mkmax+k]=0.0;
b1[i*mjmax*mkmax+j*mkmax+k]=0.0;
b2[i*mjmax*mkmax+j*mkmax+k]=0.0;
c0[i*mjmax*mkmax+j*mkmax+k]=1.0;
c1[i*mjmax*mkmax+j*mkmax+k]=1.0;
c2[i*mjmax*mkmax+j*mkmax+k]=1.0;
p[i*mjmax*mkmax+j*mkmax+k]=(float)(i*i)/(float)(imax*imax);
wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0;
bnd[i*mjmax*mkmax+j*mkmax+k]=1.0;
}
}
}
/************************************/
/*****copy array to device mem*******/
hipMemcpy(dev_a0, a0, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_a1, a1, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_a2, a2, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_a3, a3, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b0, b0, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b1, b1, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b2, b2, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_c0, c0, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_c1, c1, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_c2, c2, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_wrk1, wrk1, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_wrk2, wrk2, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_bnd, bnd, N_IJK*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_p, p, N_IJK*sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(dev_gosa, gosa, sizeof(float), hipMemcpyHostToDevice);
/************************************/
printf("mimax = %d mjmax = %d mkmax = %d\n", MIMAX, MJMAX, MKMAX);
printf("imax = %d jmax = %d kmax = %d\n", imax, jmax, kmax);
cpu0 = second(); /**measuring**/
dim3 block(BLOCKSIZEX, BLOCKSIZEY, 1);
dim3 grid(GRIDSIZEX, GRIDSIZEY, 1);
hipLaunchKernelGGL(( jacobi), dim3(grid), dim3(block), sizeof(float) * 3 * (BLOCKSIZEX + 2) * (BLOCKSIZEY + 2), 0, dev_a0, dev_a1, dev_a2, dev_a3, dev_b0, dev_b1, dev_b2, dev_c0, dev_c1, dev_c2, dev_p, dev_wrk1, dev_wrk2, dev_bnd, NN, mimax, mjmax, mkmax, omega, dev_gosa);
hipDeviceSynchronize();
cpu1 = second();
hipMemcpy(&gosa, dev_gosa, sizeof(float)*THREAD_NUM, hipMemcpyDeviceToHost);
/******Free mem on the GPU**********/
hipFree(dev_a0);
hipFree(dev_a1);
hipFree(dev_a2);
hipFree(dev_a3);
hipFree(dev_b0);
hipFree(dev_b1);
hipFree(dev_b2);
hipFree(dev_c0);
hipFree(dev_c1);
hipFree(dev_c2);
hipFree(dev_p);
hipFree(dev_wrk1);
hipFree(dev_wrk2);
hipFree(dev_bnd);
hipFree(dev_gosa);
/************************************/
/********Final sum of gosa***********/
for(int gosa_index=0; gosa_index<THREAD_NUM; gosa_index++){
final_gosa += gosa[gosa_index];
//printf("Gosa%d: %e \n", gosa_index, gosa[gosa_index]);
}
/************************************/
nflop = (kmax-2)*(jmax-2)*(imax-2)*34;
if(cpu1 != 0.0)
xmflops2 = nflop/cpu1*1.0e-6*(float)NN;
score = xmflops2/32.27;
printf("gpu: %f sec.\n", cpu1);
printf("Loop executed for %d times\n", NN);
printf("Gosa: %e \n", final_gosa);
printf("MFLOPS measured: %f\n", xmflops2);
//printf("Score: %f\n", score);
return(0);
}
| d97dfd1911dbe656d48045ceacfa3bf1c219af45.cu | #include<stdio.h>
#include<sys/time.h>
#define BLOCKSIZEX 128
#define BLOCKSIZEY 4
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
#define GRIDSIZEX 4
#define GRIDSIZEY 64
#define GRIDSIZE GRIDSIZEX * GRIDSIZEY
#define THREAD_NUM BLOCKSIZE * GRIDSIZE
#define MIMAX 256
#define MJMAX GRIDSIZEY * BLOCKSIZEY + 2
#define MKMAX GRIDSIZEX * BLOCKSIZEX + 2
#define NN 750
/*static float p[MIMAX][MJMAX][MKMAX];
static float a[MIMAX][MJMAX][MKMAX][4];
static float b[MIMAX][MJMAX][MKMAX][3];
static float c[MIMAX][MJMAX][MKMAX][3];
static float bnd[MIMAX][MJMAX][MKMAX];
static float work1[MIMAX][MJMAX][MKMAX];
static float work2[MIMAX][MJMAX][MKMAX];*/
static int imax, jmax, kmax, mimax, mjmax, mkmax;
static float omega;
double second(){
struct timeval tm;
double t;
static int base_sec = 0, base_usec = 0;
gettimeofday(&tm, NULL);
if(base_sec == 0 && base_usec == 0){
base_sec = tm.tv_sec;
base_usec = tm.tv_usec;
t = 0.0;
}
else{
t = (double)(tm.tv_sec-base_sec) + ((double)(tm.tv_usec-base_usec))/1.0e6;
}
return t;
}
__global__ void jacobi(float *a0, float *a1, float *a2, float *a3, float *b0, float *b1, float *b2, float *c0, float *c1, float *c2, float *p, float *wrk1, float *wrk2, float *bnd, int nn, int imax, int jmax, int kmax, float omega, float *gosa){
int i, j, k, n, xy, c, csb;
float s0, ss, temp;
//const int size = (imax-1)/(imax-1);
k = threadIdx.x + blockDim.x * blockIdx.x + 1;
j = threadIdx.y + blockDim.y * blockIdx.y + 1;
const int tid = (k-1) + (j-1) * (kmax-2);
xy = kmax * jmax;
extern __shared__ float sb[];
float *sb_t = sb;
float *sb_m = sb + (blockDim.x + 2) * (blockDim.y + 2);
float *sb_b = sb + 2 * (blockDim.x + 2) * (blockDim.y + 2);
csb = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2);
for(n=0;n<nn;++n){
c = j * kmax + k;
temp = 0.0;
//printf("%d, %f\n", 2*(blockDim.x + 2) * (blockDim.y + 2) , sb_t[0]);
sb_m[csb] = p[c];
sb_b[csb] = p[c+xy];
if(threadIdx.x == 0){
sb_m[csb-1] = p[c-1];
sb_b[csb-1] = p[c+xy-1];
}
if(threadIdx.x == blockDim.x-1){
sb_m[csb+1] = p[c+1];
sb_b[csb+1] = p[c+xy+1];
}
if(threadIdx.y == 0){
sb_m[csb-blockDim.x-2] = p[c-kmax];
sb_b[csb-blockDim.x-2] = p[c+xy-kmax];
}
if(threadIdx.y == blockDim.y-1){
sb_m[csb+blockDim.x+2] = p[c+kmax];
sb_b[csb+blockDim.x+2] = p[c+xy+kmax];
}
__syncthreads();
for(i=1 ; i<imax-1 ; ++i){
c += xy;
float *sb_tmp = sb_t;
sb_t = sb_m;
sb_m = sb_b;
if(threadIdx.x == 0 && threadIdx.y == 0){
sb_m[0] = p[c-kmax-1];
sb_m[blockDim.x+2] = p[c-kmax+1];
sb_m[(blockDim.y+1)*(blockDim.x+2)] = p[c+kmax-1];
sb_m[(blockDim.y+2)*(blockDim.x+2)] = p[c+kmax+1];
}
sb_b = sb_tmp;
sb_b[csb] = p[c+xy];
if(threadIdx.x == 0){ sb_b[csb-1] = p[c+xy-1];}
if(threadIdx.x == blockDim.x-1){ sb_b[csb+1] = p[c+xy+1];}
if(threadIdx.y == 0){ sb_b[csb-blockDim.x-2] = p[c+xy-kmax];}
if(threadIdx.y == blockDim.y-1){ sb_b[csb+blockDim.x+2] = p[c+xy+kmax];}
__syncthreads();
s0 =
a0[i*jmax*kmax+j*kmax+k] * sb_b[csb]
+ a1[i*jmax*kmax+j*kmax+k] * sb_m[csb + blockDim.x + 2]
+ a2[i*jmax*kmax+j*kmax+k] * sb_m[csb + 1]
+ b0[i*jmax*kmax+j*kmax+k] * (
sb_b[csb + blockDim.x + 2]
- sb_b[csb - blockDim.x - 2]
- sb_t[csb + blockDim.x + 2]
+ sb_t[csb - blockDim.x - 2])
+ b1[i*jmax*kmax+j*kmax+k] *(
sb_m[csb + blockDim.x + 3]
- sb_m[csb - blockDim.x - 1]
- sb_m[csb - blockDim.x - 3]
+ sb_m[csb + blockDim.x + 1])
+ b2[i*jmax*kmax+j*kmax+k] *(
sb_b[csb + 1]
- sb_t[csb + 1]
- sb_b[csb - 1]
+ sb_t[csb - 1])
+ c0[i*jmax*kmax+j*kmax+k] * sb_t[csb]
+ c1[i*jmax*kmax+j*kmax+k] * sb_m[csb - blockDim.x - 2]
+ c2[i*jmax*kmax+j*kmax+k] * sb_m[csb - 1]
+ wrk1[i*jmax*kmax+j*kmax+k];
ss = ( s0 * a3[i*jmax*kmax+j*kmax+k] - sb_m[csb] ) * bnd[i*jmax*kmax+j*kmax+k];
temp = temp + ss * ss;
wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss;
}
for(i=1 ; i<imax-1 ; i++){
p[i*jmax*kmax+j*kmax+k] = wrk2[i*jmax*kmax+j*kmax+k];
}
} /* end n loop */
//printf("%d: temp = %d\n", tid, temp);
//printf("shared: %f", sb[csb]);
gosa[tid] = temp;
}
int main(){
int i, j, k;
float final_gosa;
double cpu0, cpu1, nflop, xmflops2, score;
float gosa[THREAD_NUM];
/************************************/
float *p;
float *a0, *a1, *a2, *a3;
float *b0, *b1, *b2;
float *c0, *c1, *c2;
float *bnd;
float *wrk1, *wrk2;
/************************************/
mimax = MIMAX;
mjmax = MJMAX;
mkmax = MKMAX;
imax = MIMAX-1;
jmax = MJMAX-1;
kmax = MKMAX-1;
//int N_IJK = MIMAX*MJMAX*MKMAX;
int N_IJK = mimax*mjmax*mkmax;
/************************************/
float *dev_p;
float *dev_a0, *dev_a1, *dev_a2, *dev_a3;
float *dev_b0, *dev_b1, *dev_b2;
float *dev_c0, *dev_c1, *dev_c2;
float *dev_bnd;
float *dev_wrk1, *dev_wrk2;
float *dev_gosa;
/************************************/
omega = 0.8;
//initial_maxtrix();
/******allocate mem on CPU***********/
a0 = (float*)malloc(sizeof(float)*N_IJK);
a1 = (float*)malloc(sizeof(float)*N_IJK);
a2 = (float*)malloc(sizeof(float)*N_IJK);
a3 = (float*)malloc(sizeof(float)*N_IJK);
b0 = (float*)malloc(sizeof(float)*N_IJK);
b1 = (float*)malloc(sizeof(float)*N_IJK);
b2 = (float*)malloc(sizeof(float)*N_IJK);
c0 = (float*)malloc(sizeof(float)*N_IJK);
c1 = (float*)malloc(sizeof(float)*N_IJK);
c2 = (float*)malloc(sizeof(float)*N_IJK);
p = (float*)malloc(sizeof(float)*N_IJK);
wrk1 = (float*)malloc(sizeof(float)*N_IJK);
wrk2 = (float*)malloc(sizeof(float)*N_IJK);
bnd = (float*)malloc(sizeof(float)*N_IJK);
//gosa = (float*)malloc(sizeof(float));
/************************************/
/******allocate mem on GPU***********/
cudaMalloc((void**)&dev_a0, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_a1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_a2, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_a3, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_b0, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_b1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_b2, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_c0, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_c1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_c2, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_p, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_bnd, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_wrk1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_wrk2, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_gosa, sizeof(float)*THREAD_NUM);
/************************************/
/*****Initialize*********************/
//int i,j,k;
for(i=0 ; i<mimax ; ++i){
for(j=0 ; j<mjmax ; ++j){
for(k=0 ; k<mkmax ; ++k){
a0[i*mjmax*mkmax+j*mkmax+k]=0.0;
a1[i*mjmax*mkmax+j*mkmax+k]=0.0;
a2[i*mjmax*mkmax+j*mkmax+k]=0.0;
a3[i*mjmax*mkmax+j*mkmax+k]=0.0;
b0[i*mjmax*mkmax+j*mkmax+k]=0.0;
b1[i*mjmax*mkmax+j*mkmax+k]=0.0;
b2[i*mjmax*mkmax+j*mkmax+k]=0.0;
c0[i*mjmax*mkmax+j*mkmax+k]=0.0;
c1[i*mjmax*mkmax+j*mkmax+k]=0.0;
c2[i*mjmax*mkmax+j*mkmax+k]=0.0;
p[i*mjmax*mkmax+j*mkmax+k]=0.0;
wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0;
bnd[i*mjmax*mkmax+j*mkmax+k]=0.0;
}
}
}
for(i=0 ; i<mimax ; ++i){
for(j=0 ; j<mjmax ; ++j){
for(k=0 ; k<mkmax ; ++k){
a0[i*mjmax*mkmax+j*mkmax+k]=1.0;
a1[i*mjmax*mkmax+j*mkmax+k]=1.0;
a2[i*mjmax*mkmax+j*mkmax+k]=1.0;
a3[i*mjmax*mkmax+j*mkmax+k]=1.0/6.0;
b0[i*mjmax*mkmax+j*mkmax+k]=0.0;
b1[i*mjmax*mkmax+j*mkmax+k]=0.0;
b2[i*mjmax*mkmax+j*mkmax+k]=0.0;
c0[i*mjmax*mkmax+j*mkmax+k]=1.0;
c1[i*mjmax*mkmax+j*mkmax+k]=1.0;
c2[i*mjmax*mkmax+j*mkmax+k]=1.0;
p[i*mjmax*mkmax+j*mkmax+k]=(float)(i*i)/(float)(imax*imax);
wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0;
bnd[i*mjmax*mkmax+j*mkmax+k]=1.0;
}
}
}
/************************************/
/*****copy array to device mem*******/
cudaMemcpy(dev_a0, a0, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a1, a1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a2, a2, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a3, a3, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b0, b0, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b1, b1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b2, b2, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c0, c0, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c1, c1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c2, c2, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_wrk1, wrk1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_wrk2, wrk2, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_bnd, bnd, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_p, p, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_gosa, gosa, sizeof(float), cudaMemcpyHostToDevice);
/************************************/
printf("mimax = %d mjmax = %d mkmax = %d\n", MIMAX, MJMAX, MKMAX);
printf("imax = %d jmax = %d kmax = %d\n", imax, jmax, kmax);
cpu0 = second(); /**measuring**/
dim3 block(BLOCKSIZEX, BLOCKSIZEY, 1);
dim3 grid(GRIDSIZEX, GRIDSIZEY, 1);
jacobi<<<grid, block, sizeof(float) * 3 * (BLOCKSIZEX + 2) * (BLOCKSIZEY + 2)>>>(dev_a0, dev_a1, dev_a2, dev_a3, dev_b0, dev_b1, dev_b2, dev_c0, dev_c1, dev_c2, dev_p, dev_wrk1, dev_wrk2, dev_bnd, NN, mimax, mjmax, mkmax, omega, dev_gosa);
cudaDeviceSynchronize();
cpu1 = second();
cudaMemcpy(&gosa, dev_gosa, sizeof(float)*THREAD_NUM, cudaMemcpyDeviceToHost);
/******Free mem on the GPU**********/
cudaFree(dev_a0);
cudaFree(dev_a1);
cudaFree(dev_a2);
cudaFree(dev_a3);
cudaFree(dev_b0);
cudaFree(dev_b1);
cudaFree(dev_b2);
cudaFree(dev_c0);
cudaFree(dev_c1);
cudaFree(dev_c2);
cudaFree(dev_p);
cudaFree(dev_wrk1);
cudaFree(dev_wrk2);
cudaFree(dev_bnd);
cudaFree(dev_gosa);
/************************************/
/********Final sum of gosa***********/
for(int gosa_index=0; gosa_index<THREAD_NUM; gosa_index++){
final_gosa += gosa[gosa_index];
//printf("Gosa%d: %e \n", gosa_index, gosa[gosa_index]);
}
/************************************/
nflop = (kmax-2)*(jmax-2)*(imax-2)*34;
if(cpu1 != 0.0)
xmflops2 = nflop/cpu1*1.0e-6*(float)NN;
score = xmflops2/32.27;
printf("gpu: %f sec.\n", cpu1);
printf("Loop executed for %d times\n", NN);
printf("Gosa: %e \n", final_gosa);
printf("MFLOPS measured: %f\n", xmflops2);
//printf("Score: %f\n", score);
return(0);
}
|
7fcec2b7f845639a369c19b690a2522b478a2f82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "vars.h"
#include "vars_gpu.h"
extern int raynuminc;
extern int raynumout;
extern int mynumray;
extern int mynumpix;
extern int *raysendstart;
extern int *rayrecvstart;
extern int *raysendcount;
extern int *rayrecvcount;
extern int *rayraystart;
extern int *rayrayind;
extern int *rayrecvlist;
extern double ftime;
extern double btime;
extern double fktime;
extern double frtime;
extern double bktime;
extern double brtime;
extern double aftime;
extern double abtime;
extern int numproj;
extern int numback;
extern int proj_rownztot;
extern int *proj_rowdispl;
extern int *proj_rowindex;
extern float *proj_rowvalue;
extern int proj_blocksize;
extern int proj_numblocks;
extern int proj_blocknztot;
extern int *proj_blockdispl;
extern int *proj_blockindex;
extern float *proj_blockvalue;
extern int proj_buffsize;
extern int *proj_buffdispl;
extern int proj_buffnztot;
extern int *proj_buffmap;
extern short *proj_buffindex;
extern float *proj_buffvalue;
extern int back_rownztot;
extern int *back_rowdispl;
extern int *back_rowindex;
extern float *back_rowvalue;
extern int back_blocksize;
extern int back_numblocks;
extern int back_blocknztot;
extern int *back_blockdispl;
extern int *back_blockindex;
extern float *back_blockvalue;
extern int back_buffsize;
extern int *back_buffdispl;
extern int back_buffnztot;
extern int *back_buffmap;
extern short *back_buffindex;
extern float *back_buffvalue;
int *proj_blockdispl_d;
int *proj_buffdispl_d;
int *proj_buffmap_d;
short *proj_buffindex_d;
float *proj_buffvalue_d;
int *back_blockdispl_d;
int *back_buffdispl_d;
int *back_buffmap_d;
short *back_buffindex_d;
float *back_buffvalue_d;
int *rayraystart_d;
int *rayrayind_d;
int *rayindray_d;
float *tomogram_d;
float *sinogram_d;
float *raypart_d;
float *raybuff_d;
extern float *raypart;
extern float *raybuff;
void setup_gpu(float **obj,float **gra, float **dir,float **mes,float **res,float **ray){
int numproc;
int myid;
MPI_Comm_size(MPI_COMM_WORLD,&numproc);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
int device = myid % 4;///8;
printf("myid: %d device: %d\n",myid,device);
hipSetDevice(device);
if(myid==0){
int deviceCount;
hipGetDeviceCount(&deviceCount);
printf("\n");
printf("Device Count: %d\n",deviceCount);
//for (int dev = 0; dev < deviceCount; dev++) {
int dev = myid;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Device %d name: %d\n",dev,deviceProp.name);
printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor);
printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem);
printf("Maximum constant memory size: %d\n",deviceProp.totalConstMem);
printf("Maximum shared memory size per block: %d\n",deviceProp.sharedMemPerBlock);
printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]);
printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]);
printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock);
printf("Warp size: %d\n",deviceProp.warpSize);
printf("\n");
//}
}
hipHostMalloc((void**)obj,sizeof(float)*mynumpix);
hipHostMalloc((void**)gra,sizeof(float)*mynumpix);
hipHostMalloc((void**)dir,sizeof(float)*mynumpix);
hipHostMalloc((void**)mes,sizeof(float)*mynumray);
hipHostMalloc((void**)res,sizeof(float)*mynumray);
hipHostMalloc((void**)ray,sizeof(float)*mynumray);
//raypart = new float[raynumout];
//raybuff = new float[raynuminc];
hipHostMalloc((void**)&raypart,sizeof(float)*raynumout);
hipHostMalloc((void**)&raybuff,sizeof(float)*raynuminc);
float projmem = 0;
projmem = projmem + sizeof(int)/1e9*(proj_numblocks+1);
projmem = projmem + sizeof(int)/1e9*(proj_blocknztot+1);
projmem = projmem + sizeof(int)/1e9*(proj_blocknztot*proj_buffsize);
projmem = projmem + sizeof(int)/1e9*(proj_buffnztot*proj_blocksize);
projmem = projmem + sizeof(float)/1e9*(proj_buffnztot*proj_blocksize);
//printf("PROC %d FORWARD PROJECTION MEMORY: %f GB\n",myid,projmem);
hipMalloc((void**)&proj_blockdispl_d,sizeof(int)*(proj_numblocks+1));
hipMalloc((void**)&proj_buffdispl_d,sizeof(int)*(proj_blocknztot+1));
hipMalloc((void**)&proj_buffmap_d,sizeof(int)*proj_blocknztot*proj_buffsize);
hipMalloc((void**)&proj_buffindex_d,sizeof(int)*proj_buffnztot*proj_blocksize);
hipMalloc((void**)&proj_buffvalue_d,sizeof(float)*proj_buffnztot*proj_blocksize);
hipMemcpy(proj_blockdispl_d,proj_blockdispl,sizeof(int)*(proj_numblocks+1),hipMemcpyHostToDevice);
hipMemcpy(proj_buffdispl_d,proj_buffdispl,sizeof(int)*(proj_blocknztot+1),hipMemcpyHostToDevice);
hipMemcpy(proj_buffmap_d,proj_buffmap,sizeof(int)*proj_blocknztot*proj_buffsize,hipMemcpyHostToDevice);
hipMemcpy(proj_buffindex_d,proj_buffindex,sizeof(short)*proj_buffnztot*proj_blocksize,hipMemcpyHostToDevice);
hipMemcpy(proj_buffvalue_d,proj_buffvalue,sizeof(float)*proj_buffnztot*proj_blocksize,hipMemcpyHostToDevice);
/*for(int block = 0; block < proj_numblocks; block++)
for(int buff = proj_blockdispl[block]; buff < proj_blockdispl[block+1]; buff++){
printf(" block %d buff %d\n",block,buff);
for(int m = proj_buffdispl[buff]; m < proj_buffdispl[buff+1]; m++){
for(int n = 0; n < proj_blocksize; n++)
printf("%d ",proj_buffindex[m*proj_blocksize+n]);
printf("\n");
}
}
for(int block = 0; block < proj_numblocks; block++)
for(int buff = proj_blockdispl[block]; buff < proj_blockdispl[block+1]; buff++){
printf(" block %d buff %d\n",block,buff);
for(int m = proj_buffdispl[buff]; m < proj_buffdispl[buff+1]; m++){
for(int n = 0; n < proj_blocksize; n++)
printf("%0.1f ",proj_buffvalue[m*proj_blocksize+n]);
printf("\n");
}
}
printf("buffnztot: %d blocksize: %d %d\n",proj_buffnztot,proj_blocksize,proj_buffnztot*proj_blocksize);
printf("blocknztot: %d buffsize: %d %d\n",proj_blocknztot,proj_buffsize,proj_blocknztot*proj_buffsize);*/
hipMalloc((void**)&back_blockdispl_d,sizeof(int)*(back_numblocks+1));
hipMalloc((void**)&back_buffdispl_d,sizeof(int)*(back_blocknztot+1));
hipMalloc((void**)&back_buffmap_d,sizeof(int)*back_blocknztot*back_buffsize);
hipMalloc((void**)&back_buffindex_d,sizeof(int)*back_buffnztot*back_blocksize);
hipMalloc((void**)&back_buffvalue_d,sizeof(float)*back_buffnztot*back_blocksize);
hipMemcpy(back_blockdispl_d,back_blockdispl,sizeof(int)*(back_numblocks+1),hipMemcpyHostToDevice);
hipMemcpy(back_buffdispl_d,back_buffdispl,sizeof(int)*(back_blocknztot+1),hipMemcpyHostToDevice);
hipMemcpy(back_buffmap_d,back_buffmap,sizeof(int)*back_blocknztot*back_buffsize,hipMemcpyHostToDevice);
hipMemcpy(back_buffindex_d,back_buffindex,sizeof(short)*back_buffnztot*back_blocksize,hipMemcpyHostToDevice);
hipMemcpy(back_buffvalue_d,back_buffvalue,sizeof(float)*back_buffnztot*back_blocksize,hipMemcpyHostToDevice);
float backmem = 0;
backmem = backmem + sizeof(int)/1e9*(back_numblocks+1);
backmem = backmem + sizeof(int)/1e9*(back_blocknztot+1);
backmem = backmem + sizeof(int)/1e9*(back_blocknztot*back_buffsize);
backmem = backmem + sizeof(int)/1e9*(back_buffnztot*back_blocksize);
backmem = backmem + sizeof(float)/1e9*(back_buffnztot*back_blocksize);
//printf("PROC %d BACKPROJECTION MEMORY: %f GB\n",myid,backmem);
printf("PROC %d TOTAL GPU MEMORY: %f GB\n",myid,projmem+backmem);
hipMalloc((void**)&rayraystart_d,sizeof(int)*(mynumray+1));
hipMalloc((void**)&rayrayind_d,sizeof(int)*raynuminc);
hipMalloc((void**)&rayindray_d,sizeof(int)*raynuminc);
hipMemcpy(rayraystart_d,rayraystart,sizeof(int)*(mynumray+1),hipMemcpyHostToDevice);
hipMemcpy(rayrayind_d,rayrayind,sizeof(int)*raynuminc,hipMemcpyHostToDevice);
hipMemcpy(rayindray_d,rayrecvlist,sizeof(int)*raynuminc,hipMemcpyHostToDevice);
hipMalloc((void**)&tomogram_d,sizeof(float)*mynumpix);
hipMalloc((void**)&sinogram_d,sizeof(float)*mynumray);
hipMalloc((void**)&raypart_d,sizeof(float)*raynumout);
hipMalloc((void**)&raybuff_d,sizeof(float)*raynuminc);
hipFuncSetAttribute(kernel_SpMV_buffered,hipFuncAttributeMaxDynamicSharedMemorySize,98304);
}
void projection(float *mes, float *obj){
MPI_Barrier(MPI_COMM_WORLD);
double timef = MPI_Wtime();
{
double time = timef;
int blocksize = proj_blocksize;
int numblocks = proj_numblocks;
int buffsize = proj_buffsize;
int *blockdispl = proj_blockdispl_d;
int *buffdispl = proj_buffdispl_d;
int *buffmap = proj_buffmap_d;
short *buffindex = proj_buffindex_d;
float *buffvalue = proj_buffvalue_d;
hipMemcpy(tomogram_d,obj,sizeof(float)*mynumpix,hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( kernel_SpMV_buffered), dim3(numblocks),dim3(blocksize),sizeof(float)*buffsize, 0, raypart_d,tomogram_d,buffindex,buffvalue,raynumout,blockdispl,buffdispl,buffmap,buffsize);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
fktime = fktime + milliseconds/1000;
//hipDeviceSynchronize();
//MPI_Barrier(MPI_COMM_WORLD);
//fktime = fktime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
hipMemcpy(raypart,raypart_d,sizeof(float)*raynumout,hipMemcpyDeviceToHost);
MPI_Alltoallv(raypart,raysendcount,raysendstart,MPI_FLOAT,raybuff,rayrecvcount,rayrecvstart,MPI_FLOAT,MPI_COMM_WORLD);
hipMemcpy(raybuff_d,raybuff,sizeof(float)*raynuminc,hipMemcpyHostToDevice);
MPI_Barrier(MPI_COMM_WORLD);
aftime = aftime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
hipLaunchKernelGGL(( kernel_SpReduce), dim3((mynumray+255)/256),dim3(256), 0, 0, sinogram_d,raybuff_d,rayraystart_d,rayrayind_d,mynumray);
hipMemcpy(mes,sinogram_d,sizeof(float)*mynumray,hipMemcpyDeviceToHost);
MPI_Barrier(MPI_COMM_WORLD);
frtime = frtime + MPI_Wtime()-time;
}
ftime = ftime + MPI_Wtime()-timef;
numproj++;
}
void backprojection(float *gra, float *res){
MPI_Barrier(MPI_COMM_WORLD);
double timeb = MPI_Wtime();
{
double time = timeb;
hipMemcpy(sinogram_d,res,sizeof(float)*mynumray,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_SpGather), dim3((raynuminc+255)/256),dim3(256), 0, 0, raybuff_d,sinogram_d,rayindray_d,raynuminc);
hipDeviceSynchronize();
MPI_Barrier(MPI_COMM_WORLD);
brtime = brtime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
hipMemcpy(raybuff,raybuff_d,sizeof(float)*raynuminc,hipMemcpyDeviceToHost);
MPI_Alltoallv(raybuff,rayrecvcount,rayrecvstart,MPI_FLOAT,raypart,raysendcount,raysendstart,MPI_FLOAT,MPI_COMM_WORLD);
hipMemcpy(raypart_d,raypart,sizeof(float)*raynumout,hipMemcpyHostToDevice);
MPI_Barrier(MPI_COMM_WORLD);
abtime = abtime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
int blocksize = back_blocksize;
int numblocks = back_numblocks;
int buffsize = back_buffsize;
int *blockdispl = back_blockdispl_d;
int *buffdispl = back_buffdispl_d;
int *buffmap = back_buffmap_d;
short *buffindex = back_buffindex_d;
float *buffvalue = back_buffvalue_d;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( kernel_SpMV_buffered), dim3(numblocks),dim3(blocksize),sizeof(float)*buffsize, 0, tomogram_d,raypart_d,buffindex,buffvalue,mynumpix,blockdispl,buffdispl,buffmap,buffsize);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
bktime = bktime + milliseconds/1000;
hipMemcpy(gra,tomogram_d,sizeof(float)*mynumpix,hipMemcpyDeviceToHost);
//MPI_Barrier(MPI_COMM_WORLD);
//bktime = bktime + MPI_Wtime()-time;
}
btime = btime + MPI_Wtime()-timeb;
numback++;
}
__global__ void kernel_SpMV_buffered(float *y, float *x, short *index, float *value, int numrow, int *blockdispl, int *buffdispl, int *buffmap, int buffsize){
extern __shared__ float shared[];
float reduce = 0;
int ind;
for(int buff = blockdispl[blockIdx.x]; buff < blockdispl[blockIdx.x+1]; buff++){
for(int i = threadIdx.x; i < buffsize; i += blockDim.x)
shared[i] = x[buffmap[buff*buffsize+i]];
__syncthreads();
for(int n = buffdispl[buff]; n < buffdispl[buff+1]; n++){
ind = n*blockDim.x+threadIdx.x;
reduce = reduce + shared[index[ind]]*value[ind];
}
__syncthreads();
}
ind = blockIdx.x*blockDim.x+threadIdx.x;
if(ind < numrow)
y[ind] = reduce;
}
__global__ void kernel_SpReduce(float *y, float *x, int *displ, int *index, int numrow){
int row = blockIdx.x*blockDim.x+threadIdx.x;
float reduce = 0;
if(row < numrow){
for(int n = displ[row]; n < displ[row+1]; n++)
reduce = reduce + x[index[n]];
y[row] = reduce;
}
}
__global__ void kernel_SpGather(float *y, float *x, int *index, int numrow){
int row = blockIdx.x*blockDim.x+threadIdx.x;
if(row < numrow)
y[row] = x[index[row]];
}
| 7fcec2b7f845639a369c19b690a2522b478a2f82.cu | #include "vars.h"
#include "vars_gpu.h"
extern int raynuminc;
extern int raynumout;
extern int mynumray;
extern int mynumpix;
extern int *raysendstart;
extern int *rayrecvstart;
extern int *raysendcount;
extern int *rayrecvcount;
extern int *rayraystart;
extern int *rayrayind;
extern int *rayrecvlist;
extern double ftime;
extern double btime;
extern double fktime;
extern double frtime;
extern double bktime;
extern double brtime;
extern double aftime;
extern double abtime;
extern int numproj;
extern int numback;
extern int proj_rownztot;
extern int *proj_rowdispl;
extern int *proj_rowindex;
extern float *proj_rowvalue;
extern int proj_blocksize;
extern int proj_numblocks;
extern int proj_blocknztot;
extern int *proj_blockdispl;
extern int *proj_blockindex;
extern float *proj_blockvalue;
extern int proj_buffsize;
extern int *proj_buffdispl;
extern int proj_buffnztot;
extern int *proj_buffmap;
extern short *proj_buffindex;
extern float *proj_buffvalue;
extern int back_rownztot;
extern int *back_rowdispl;
extern int *back_rowindex;
extern float *back_rowvalue;
extern int back_blocksize;
extern int back_numblocks;
extern int back_blocknztot;
extern int *back_blockdispl;
extern int *back_blockindex;
extern float *back_blockvalue;
extern int back_buffsize;
extern int *back_buffdispl;
extern int back_buffnztot;
extern int *back_buffmap;
extern short *back_buffindex;
extern float *back_buffvalue;
int *proj_blockdispl_d;
int *proj_buffdispl_d;
int *proj_buffmap_d;
short *proj_buffindex_d;
float *proj_buffvalue_d;
int *back_blockdispl_d;
int *back_buffdispl_d;
int *back_buffmap_d;
short *back_buffindex_d;
float *back_buffvalue_d;
int *rayraystart_d;
int *rayrayind_d;
int *rayindray_d;
float *tomogram_d;
float *sinogram_d;
float *raypart_d;
float *raybuff_d;
extern float *raypart;
extern float *raybuff;
void setup_gpu(float **obj,float **gra, float **dir,float **mes,float **res,float **ray){
int numproc;
int myid;
MPI_Comm_size(MPI_COMM_WORLD,&numproc);
MPI_Comm_rank(MPI_COMM_WORLD,&myid);
int device = myid % 4;///8;
printf("myid: %d device: %d\n",myid,device);
cudaSetDevice(device);
if(myid==0){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("\n");
printf("Device Count: %d\n",deviceCount);
//for (int dev = 0; dev < deviceCount; dev++) {
int dev = myid;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d name: %d\n",dev,deviceProp.name);
printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor);
printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem);
printf("Maximum constant memory size: %d\n",deviceProp.totalConstMem);
printf("Maximum shared memory size per block: %d\n",deviceProp.sharedMemPerBlock);
printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]);
printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]);
printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock);
printf("Warp size: %d\n",deviceProp.warpSize);
printf("\n");
//}
}
cudaMallocHost((void**)obj,sizeof(float)*mynumpix);
cudaMallocHost((void**)gra,sizeof(float)*mynumpix);
cudaMallocHost((void**)dir,sizeof(float)*mynumpix);
cudaMallocHost((void**)mes,sizeof(float)*mynumray);
cudaMallocHost((void**)res,sizeof(float)*mynumray);
cudaMallocHost((void**)ray,sizeof(float)*mynumray);
//raypart = new float[raynumout];
//raybuff = new float[raynuminc];
cudaMallocHost((void**)&raypart,sizeof(float)*raynumout);
cudaMallocHost((void**)&raybuff,sizeof(float)*raynuminc);
float projmem = 0;
projmem = projmem + sizeof(int)/1e9*(proj_numblocks+1);
projmem = projmem + sizeof(int)/1e9*(proj_blocknztot+1);
projmem = projmem + sizeof(int)/1e9*(proj_blocknztot*proj_buffsize);
projmem = projmem + sizeof(int)/1e9*(proj_buffnztot*proj_blocksize);
projmem = projmem + sizeof(float)/1e9*(proj_buffnztot*proj_blocksize);
//printf("PROC %d FORWARD PROJECTION MEMORY: %f GB\n",myid,projmem);
cudaMalloc((void**)&proj_blockdispl_d,sizeof(int)*(proj_numblocks+1));
cudaMalloc((void**)&proj_buffdispl_d,sizeof(int)*(proj_blocknztot+1));
cudaMalloc((void**)&proj_buffmap_d,sizeof(int)*proj_blocknztot*proj_buffsize);
cudaMalloc((void**)&proj_buffindex_d,sizeof(int)*proj_buffnztot*proj_blocksize);
cudaMalloc((void**)&proj_buffvalue_d,sizeof(float)*proj_buffnztot*proj_blocksize);
cudaMemcpy(proj_blockdispl_d,proj_blockdispl,sizeof(int)*(proj_numblocks+1),cudaMemcpyHostToDevice);
cudaMemcpy(proj_buffdispl_d,proj_buffdispl,sizeof(int)*(proj_blocknztot+1),cudaMemcpyHostToDevice);
cudaMemcpy(proj_buffmap_d,proj_buffmap,sizeof(int)*proj_blocknztot*proj_buffsize,cudaMemcpyHostToDevice);
cudaMemcpy(proj_buffindex_d,proj_buffindex,sizeof(short)*proj_buffnztot*proj_blocksize,cudaMemcpyHostToDevice);
cudaMemcpy(proj_buffvalue_d,proj_buffvalue,sizeof(float)*proj_buffnztot*proj_blocksize,cudaMemcpyHostToDevice);
/*for(int block = 0; block < proj_numblocks; block++)
for(int buff = proj_blockdispl[block]; buff < proj_blockdispl[block+1]; buff++){
printf(" block %d buff %d\n",block,buff);
for(int m = proj_buffdispl[buff]; m < proj_buffdispl[buff+1]; m++){
for(int n = 0; n < proj_blocksize; n++)
printf("%d ",proj_buffindex[m*proj_blocksize+n]);
printf("\n");
}
}
for(int block = 0; block < proj_numblocks; block++)
for(int buff = proj_blockdispl[block]; buff < proj_blockdispl[block+1]; buff++){
printf(" block %d buff %d\n",block,buff);
for(int m = proj_buffdispl[buff]; m < proj_buffdispl[buff+1]; m++){
for(int n = 0; n < proj_blocksize; n++)
printf("%0.1f ",proj_buffvalue[m*proj_blocksize+n]);
printf("\n");
}
}
printf("buffnztot: %d blocksize: %d %d\n",proj_buffnztot,proj_blocksize,proj_buffnztot*proj_blocksize);
printf("blocknztot: %d buffsize: %d %d\n",proj_blocknztot,proj_buffsize,proj_blocknztot*proj_buffsize);*/
cudaMalloc((void**)&back_blockdispl_d,sizeof(int)*(back_numblocks+1));
cudaMalloc((void**)&back_buffdispl_d,sizeof(int)*(back_blocknztot+1));
cudaMalloc((void**)&back_buffmap_d,sizeof(int)*back_blocknztot*back_buffsize);
cudaMalloc((void**)&back_buffindex_d,sizeof(int)*back_buffnztot*back_blocksize);
cudaMalloc((void**)&back_buffvalue_d,sizeof(float)*back_buffnztot*back_blocksize);
cudaMemcpy(back_blockdispl_d,back_blockdispl,sizeof(int)*(back_numblocks+1),cudaMemcpyHostToDevice);
cudaMemcpy(back_buffdispl_d,back_buffdispl,sizeof(int)*(back_blocknztot+1),cudaMemcpyHostToDevice);
cudaMemcpy(back_buffmap_d,back_buffmap,sizeof(int)*back_blocknztot*back_buffsize,cudaMemcpyHostToDevice);
cudaMemcpy(back_buffindex_d,back_buffindex,sizeof(short)*back_buffnztot*back_blocksize,cudaMemcpyHostToDevice);
cudaMemcpy(back_buffvalue_d,back_buffvalue,sizeof(float)*back_buffnztot*back_blocksize,cudaMemcpyHostToDevice);
float backmem = 0;
backmem = backmem + sizeof(int)/1e9*(back_numblocks+1);
backmem = backmem + sizeof(int)/1e9*(back_blocknztot+1);
backmem = backmem + sizeof(int)/1e9*(back_blocknztot*back_buffsize);
backmem = backmem + sizeof(int)/1e9*(back_buffnztot*back_blocksize);
backmem = backmem + sizeof(float)/1e9*(back_buffnztot*back_blocksize);
//printf("PROC %d BACKPROJECTION MEMORY: %f GB\n",myid,backmem);
printf("PROC %d TOTAL GPU MEMORY: %f GB\n",myid,projmem+backmem);
cudaMalloc((void**)&rayraystart_d,sizeof(int)*(mynumray+1));
cudaMalloc((void**)&rayrayind_d,sizeof(int)*raynuminc);
cudaMalloc((void**)&rayindray_d,sizeof(int)*raynuminc);
cudaMemcpy(rayraystart_d,rayraystart,sizeof(int)*(mynumray+1),cudaMemcpyHostToDevice);
cudaMemcpy(rayrayind_d,rayrayind,sizeof(int)*raynuminc,cudaMemcpyHostToDevice);
cudaMemcpy(rayindray_d,rayrecvlist,sizeof(int)*raynuminc,cudaMemcpyHostToDevice);
cudaMalloc((void**)&tomogram_d,sizeof(float)*mynumpix);
cudaMalloc((void**)&sinogram_d,sizeof(float)*mynumray);
cudaMalloc((void**)&raypart_d,sizeof(float)*raynumout);
cudaMalloc((void**)&raybuff_d,sizeof(float)*raynuminc);
cudaFuncSetAttribute(kernel_SpMV_buffered,cudaFuncAttributeMaxDynamicSharedMemorySize,98304);
}
void projection(float *mes, float *obj){
MPI_Barrier(MPI_COMM_WORLD);
double timef = MPI_Wtime();
{
double time = timef;
int blocksize = proj_blocksize;
int numblocks = proj_numblocks;
int buffsize = proj_buffsize;
int *blockdispl = proj_blockdispl_d;
int *buffdispl = proj_buffdispl_d;
int *buffmap = proj_buffmap_d;
short *buffindex = proj_buffindex_d;
float *buffvalue = proj_buffvalue_d;
cudaMemcpy(tomogram_d,obj,sizeof(float)*mynumpix,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
kernel_SpMV_buffered<<<numblocks,blocksize,sizeof(float)*buffsize>>>(raypart_d,tomogram_d,buffindex,buffvalue,raynumout,blockdispl,buffdispl,buffmap,buffsize);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
fktime = fktime + milliseconds/1000;
//cudaDeviceSynchronize();
//MPI_Barrier(MPI_COMM_WORLD);
//fktime = fktime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
cudaMemcpy(raypart,raypart_d,sizeof(float)*raynumout,cudaMemcpyDeviceToHost);
MPI_Alltoallv(raypart,raysendcount,raysendstart,MPI_FLOAT,raybuff,rayrecvcount,rayrecvstart,MPI_FLOAT,MPI_COMM_WORLD);
cudaMemcpy(raybuff_d,raybuff,sizeof(float)*raynuminc,cudaMemcpyHostToDevice);
MPI_Barrier(MPI_COMM_WORLD);
aftime = aftime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
kernel_SpReduce<<<(mynumray+255)/256,256>>>(sinogram_d,raybuff_d,rayraystart_d,rayrayind_d,mynumray);
cudaMemcpy(mes,sinogram_d,sizeof(float)*mynumray,cudaMemcpyDeviceToHost);
MPI_Barrier(MPI_COMM_WORLD);
frtime = frtime + MPI_Wtime()-time;
}
ftime = ftime + MPI_Wtime()-timef;
numproj++;
}
void backprojection(float *gra, float *res){
MPI_Barrier(MPI_COMM_WORLD);
double timeb = MPI_Wtime();
{
double time = timeb;
cudaMemcpy(sinogram_d,res,sizeof(float)*mynumray,cudaMemcpyHostToDevice);
kernel_SpGather<<<(raynuminc+255)/256,256>>>(raybuff_d,sinogram_d,rayindray_d,raynuminc);
cudaDeviceSynchronize();
MPI_Barrier(MPI_COMM_WORLD);
brtime = brtime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
cudaMemcpy(raybuff,raybuff_d,sizeof(float)*raynuminc,cudaMemcpyDeviceToHost);
MPI_Alltoallv(raybuff,rayrecvcount,rayrecvstart,MPI_FLOAT,raypart,raysendcount,raysendstart,MPI_FLOAT,MPI_COMM_WORLD);
cudaMemcpy(raypart_d,raypart,sizeof(float)*raynumout,cudaMemcpyHostToDevice);
MPI_Barrier(MPI_COMM_WORLD);
abtime = abtime + MPI_Wtime()-time;
}
{
double time = MPI_Wtime();
int blocksize = back_blocksize;
int numblocks = back_numblocks;
int buffsize = back_buffsize;
int *blockdispl = back_blockdispl_d;
int *buffdispl = back_buffdispl_d;
int *buffmap = back_buffmap_d;
short *buffindex = back_buffindex_d;
float *buffvalue = back_buffvalue_d;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
kernel_SpMV_buffered<<<numblocks,blocksize,sizeof(float)*buffsize>>>(tomogram_d,raypart_d,buffindex,buffvalue,mynumpix,blockdispl,buffdispl,buffmap,buffsize);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
bktime = bktime + milliseconds/1000;
cudaMemcpy(gra,tomogram_d,sizeof(float)*mynumpix,cudaMemcpyDeviceToHost);
//MPI_Barrier(MPI_COMM_WORLD);
//bktime = bktime + MPI_Wtime()-time;
}
btime = btime + MPI_Wtime()-timeb;
numback++;
}
__global__ void kernel_SpMV_buffered(float *y, float *x, short *index, float *value, int numrow, int *blockdispl, int *buffdispl, int *buffmap, int buffsize){
extern __shared__ float shared[];
float reduce = 0;
int ind;
for(int buff = blockdispl[blockIdx.x]; buff < blockdispl[blockIdx.x+1]; buff++){
for(int i = threadIdx.x; i < buffsize; i += blockDim.x)
shared[i] = x[buffmap[buff*buffsize+i]];
__syncthreads();
for(int n = buffdispl[buff]; n < buffdispl[buff+1]; n++){
ind = n*blockDim.x+threadIdx.x;
reduce = reduce + shared[index[ind]]*value[ind];
}
__syncthreads();
}
ind = blockIdx.x*blockDim.x+threadIdx.x;
if(ind < numrow)
y[ind] = reduce;
}
__global__ void kernel_SpReduce(float *y, float *x, int *displ, int *index, int numrow){
int row = blockIdx.x*blockDim.x+threadIdx.x;
float reduce = 0;
if(row < numrow){
for(int n = displ[row]; n < displ[row+1]; n++)
reduce = reduce + x[index[n]];
y[row] = reduce;
}
}
__global__ void kernel_SpGather(float *y, float *x, int *index, int numrow){
int row = blockIdx.x*blockDim.x+threadIdx.x;
if(row < numrow)
y[row] = x[index[row]];
}
|
d8a990becaabbadfd284bd2cbb62151f28a70374.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#raw
#include <stdio.h>
#end raw
// -- Define a few helpful constant(s) and macro(s)
#define uint unsigned int
#if $IMUL_FAST
#define IMUL(a, b) __mul24(a, b)
#else
#define IMUL(a, b) a * b
#end if
#set xyzw = ['x','y','z','w']
#if $USE_TEX1DFETCH
// -- Declare a float4 1D texture
// that will be used to fetch input values
texture<float4, 1, hipReadModeElementType> tex_float4;
#end if
// -- Define constant memory buffer for the (sub) filter values
__constant__ float constant \
[$INPUT_D] \
[$N_FILTER_ROWS] \
[$FILTER_W] \
[$N_OUTPUT4S] \
[$N_FILTERS];
extern "C" {
#for nk in xrange($N_KERNELS)
__global__
void cudafilter_kernel_${nk}
(
float4 *input
#for o in xrange($N_OUTPUT4S)
, float4 *output$o
#end for
)
{
// -- Shared-memory buffer for the input tiles
__shared__ float shared_in \
[$BLOCK_H] \
[$N_FILTER_ROWS] \
[$INPUT_D] \
[$INPUT_BLOCK_W + ${int($PAD_SHARED)}] ;
// -- Input/Output "pointers"
const uint in_idx = \
IMUL(IMUL(blockIdx.y, $BLOCK_H), $INPUT_W) + \
IMUL(IMUL($nk, $INPUT_W), $N_FILTER_ROWS) + \
IMUL(threadIdx.y, $INPUT_W) + \
IMUL(blockIdx.x, $BLOCK_W) + threadIdx.x ;
const uint out_idx = \
IMUL(IMUL(blockIdx.y, $BLOCK_H), $OUTPUT_W) + \
IMUL(threadIdx.y, $OUTPUT_W) + \
IMUL(blockIdx.x, $BLOCK_W) + threadIdx.x ;
#if $SPILL
// Create a shared-memory buffer to spill a register value
// into shared memory and thus reduce the register count.
__shared__ uint s_out_idx[$BLOCK_H][$BLOCK_W + ${int($PAD_SHARED)}];
s_out_idx[threadIdx.y][threadIdx.x] = out_idx;
#end if
// -------------------------------------------------------------------------
// -- Load input to shared-memory
// -------------------------------------------------------------------------
#for nfr in xrange($N_FILTER_ROWS)
#for i in xrange($N_LOAD_ITERATIONS)
#if $i==($N_LOAD_ITERATIONS-1)
if( (threadIdx.x + IMUL($BLOCK_W, $i)) < $INPUT_BLOCK_W )
#end if
{
#if $USE_TEX1DFETCH
float4 ival = tex1Dfetch(tex_float4, in_idx + IMUL($INPUT_W, $nfr) + IMUL($BLOCK_W, $i));
#else
float4 ival = input[in_idx + IMUL($INPUT_W, $nfr) + IMUL($BLOCK_W, $i)];
#end if
#for d in xrange($N_FILTERS)
shared_in[threadIdx.y][$nfr][$d][threadIdx.x + IMUL($BLOCK_W, $i)] = ival.$xyzw[$d];
#end for
}
#end for
#end for
__syncthreads();
// -------------------------------------------------------------------------
// -- Compute dot products (fully unrolled)
// -------------------------------------------------------------------------
float value, weight;
#for o in xrange($N_OUTPUT4S)
#for n in xrange($N_FILTERS)
float sum${o}${n} = 0;
#end for
#end for
#for d in xrange($INPUT_D)
#for nfr in xrange($N_FILTER_ROWS)
#for i in xrange($FILTER_W)
value = shared_in[threadIdx.y][$nfr][$d][threadIdx.x+$i];
#for o in xrange($N_OUTPUT4S)
#for n in xrange($N_FILTERS)
weight = constant[$d][$nfr][$i][$o][$n];
sum${o}${n} += value*weight;
#end for
#end for
#end for
#end for
#end for
// -------------------------------------------------------------------------
// -- Output results
// -------------------------------------------------------------------------
#for o in xrange($N_OUTPUT4S)
#for n in xrange($N_FILTERS)
#if $SPILL
output${o}[s_out_idx[threadIdx.y][threadIdx.x]].$xyzw[$n] += sum${o}${n};
#else
output${o}[out_idx].$xyzw[$n] += sum${o}${n};
#end if
#end for
#end for
}
#end for
}
| d8a990becaabbadfd284bd2cbb62151f28a70374.cu | #raw
#include <stdio.h>
#end raw
// -- Define a few helpful constant(s) and macro(s)
#define uint unsigned int
#if $IMUL_FAST
#define IMUL(a, b) __mul24(a, b)
#else
#define IMUL(a, b) a * b
#end if
#set xyzw = ['x','y','z','w']
#if $USE_TEX1DFETCH
// -- Declare a float4 1D texture
// that will be used to fetch input values
texture<float4, 1, cudaReadModeElementType> tex_float4;
#end if
// -- Define constant memory buffer for the (sub) filter values
__constant__ float constant \
[$INPUT_D] \
[$N_FILTER_ROWS] \
[$FILTER_W] \
[$N_OUTPUT4S] \
[$N_FILTERS];
extern "C" {
#for nk in xrange($N_KERNELS)
__global__
void cudafilter_kernel_${nk}
(
float4 *input
#for o in xrange($N_OUTPUT4S)
, float4 *output$o
#end for
)
{
// -- Shared-memory buffer for the input tiles
__shared__ float shared_in \
[$BLOCK_H] \
[$N_FILTER_ROWS] \
[$INPUT_D] \
[$INPUT_BLOCK_W + ${int($PAD_SHARED)}] ;
// -- Input/Output "pointers"
const uint in_idx = \
IMUL(IMUL(blockIdx.y, $BLOCK_H), $INPUT_W) + \
IMUL(IMUL($nk, $INPUT_W), $N_FILTER_ROWS) + \
IMUL(threadIdx.y, $INPUT_W) + \
IMUL(blockIdx.x, $BLOCK_W) + threadIdx.x ;
const uint out_idx = \
IMUL(IMUL(blockIdx.y, $BLOCK_H), $OUTPUT_W) + \
IMUL(threadIdx.y, $OUTPUT_W) + \
IMUL(blockIdx.x, $BLOCK_W) + threadIdx.x ;
#if $SPILL
// Create a shared-memory buffer to spill a register value
// into shared memory and thus reduce the register count.
__shared__ uint s_out_idx[$BLOCK_H][$BLOCK_W + ${int($PAD_SHARED)}];
s_out_idx[threadIdx.y][threadIdx.x] = out_idx;
#end if
// -------------------------------------------------------------------------
// -- Load input to shared-memory
// -------------------------------------------------------------------------
#for nfr in xrange($N_FILTER_ROWS)
#for i in xrange($N_LOAD_ITERATIONS)
#if $i==($N_LOAD_ITERATIONS-1)
if( (threadIdx.x + IMUL($BLOCK_W, $i)) < $INPUT_BLOCK_W )
#end if
{
#if $USE_TEX1DFETCH
float4 ival = tex1Dfetch(tex_float4, in_idx + IMUL($INPUT_W, $nfr) + IMUL($BLOCK_W, $i));
#else
float4 ival = input[in_idx + IMUL($INPUT_W, $nfr) + IMUL($BLOCK_W, $i)];
#end if
#for d in xrange($N_FILTERS)
shared_in[threadIdx.y][$nfr][$d][threadIdx.x + IMUL($BLOCK_W, $i)] = ival.$xyzw[$d];
#end for
}
#end for
#end for
__syncthreads();
// -------------------------------------------------------------------------
// -- Compute dot products (fully unrolled)
// -------------------------------------------------------------------------
float value, weight;
#for o in xrange($N_OUTPUT4S)
#for n in xrange($N_FILTERS)
float sum${o}${n} = 0;
#end for
#end for
#for d in xrange($INPUT_D)
#for nfr in xrange($N_FILTER_ROWS)
#for i in xrange($FILTER_W)
value = shared_in[threadIdx.y][$nfr][$d][threadIdx.x+$i];
#for o in xrange($N_OUTPUT4S)
#for n in xrange($N_FILTERS)
weight = constant[$d][$nfr][$i][$o][$n];
sum${o}${n} += value*weight;
#end for
#end for
#end for
#end for
#end for
// -------------------------------------------------------------------------
// -- Output results
// -------------------------------------------------------------------------
#for o in xrange($N_OUTPUT4S)
#for n in xrange($N_FILTERS)
#if $SPILL
output${o}[s_out_idx[threadIdx.y][threadIdx.x]].$xyzw[$n] += sum${o}${n};
#else
output${o}[out_idx].$xyzw[$n] += sum${o}${n};
#end if
#end for
#end for
}
#end for
}
|
877901520975206b3008bda7510fb09c17b3782a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2014, Victor Matheus de Araujo Oliveira All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "connected_component_gpu.h"
#include "texture.h"
#include "block_uf.h"
namespace apollo {
namespace perception {
using std::shared_ptr;
using std::unordered_set;
using std::vector;
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height)
: image_width_(image_width),
image_height_(image_height),
width_(image_width),
height_(image_height),
roi_x_min_(0),
roi_y_min_(0),
roi_x_max_(image_width - 1),
roi_y_max_(image_height - 1) {
total_pix_ =
static_cast<size_t>(image_width_) * static_cast<size_t>(image_height_);
hipChannelFormatDesc uchar_desc = hipCreateChannelDesc<unsigned char>();
hipMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
hipBindTextureToArray(img_tex, img_array_, uchar_desc);
hipMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
hipError_t cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr
<< "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height,
cv::Rect roi)
: image_width_(image_width),
image_height_(image_height),
width_(roi.width),
height_(roi.height),
roi_x_min_(roi.x),
roi_y_min_(roi.y),
roi_x_max_(roi.x + roi.width - 1),
roi_y_max_(roi.y + roi.height - 1) {
if (roi_x_min_ < 0) {
std::cerr << "x_min is less than 0: " << roi_x_min_ << std::endl;
}
if (roi_y_min_ < 0) {
std::cerr << "y_min is less than 0: " << roi_y_min_ << std::endl;
}
if (roi_x_max_ >= image_width_) {
std::cerr << "x_max is larger than image width: "
<< roi_x_max_ << "|"
<< image_width_ << std::endl;
}
if (roi_y_max_ >= image_height_) {
std::cerr << "y_max is larger than image height: "
<< roi_y_max_ << "|"
<< image_height_ << std::endl;
}
total_pix_ = static_cast<size_t>(width_) * static_cast<size_t>(height_);
hipChannelFormatDesc uchar_desc = hipCreateChannelDesc<unsigned char>();
hipMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
hipBindTextureToArray(img_tex, img_array_, uchar_desc);
hipMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
hipError_t cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr << "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
bool ConnectedComponentGeneratorGPU::BlockUnionFind(const unsigned char* img) {
hipError_t cuda_err;
if (width_ == image_width_) {
size_t siz = static_cast<size_t>(width_) * static_cast<size_t>(height_) *
sizeof(unsigned char);
hipMemcpyToArray(img_array_, 0, 0, img, siz, hipMemcpyHostToDevice);
} else {
size_t siz = static_cast<size_t>(width_) * sizeof(unsigned char);
for (size_t i = 0; i < static_cast<size_t>(height_); ++i) {
hipMemcpyToArray(img_array_, 0, i, img, siz, hipMemcpyHostToDevice);
img += image_width_;
}
}
dim3 block(UF_BLOCK_WIDTH, UF_BLOCK_HEIGHT);
dim3 grid(
static_cast<unsigned int>((width_ + UF_BLOCK_WIDTH - 1) / UF_BLOCK_WIDTH),
static_cast<unsigned int>((height_ + UF_BLOCK_HEIGHT - 1) /
UF_BLOCK_HEIGHT));
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr << "failed to start block union find with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
return false;
}
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipLaunchKernelGGL(( block_uf::BlockUnionFindInternal), dim3(grid), dim3(block), 0, 0, label_array_, width_,
height_);
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
hipLaunchKernelGGL(( block_uf::BlockUnionFindBoundary), dim3(grid), dim3(block), 0, 0, label_array_, width_,
height_);
hipLaunchKernelGGL(( block_uf::BlockUnionFindRoot), dim3(grid), dim3(block), 0, 0, label_array_, width_, height_);
hipMemcpy(
labels_, label_array_,
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int),
hipMemcpyDeviceToHost);
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess) {
std::cerr << "failed to finish block union find with CUDA: "
<< hipGetErrorString(cuda_err) << std::endl;
return false;
}
return true;
}
bool ConnectedComponentGeneratorGPU::FindConnectedComponents(
const cv::Mat& lane_map, vector<shared_ptr<ConnectedComponent>>* cc) {
if (lane_map.empty()) {
std::cerr << "The input lane map is empty." << std::endl;
return false;
}
if (lane_map.type() != CV_8UC1) {
std::cerr << "The input lane map type is not CV_8UC1." << std::endl;
return false;
}
if (lane_map.cols != image_width_) {
std::cerr << "The width of input lane map does not match." << std::endl;
return false;
}
if (lane_map.rows != image_height_) {
std::cerr << "The height of input lane map does not match." << std::endl;
return false;
}
if (cc == NULL) {
std::cerr << "The pointer of output connected components is null."
<< std::endl;
return false;
}
cc->clear();
const unsigned char* img =
lane_map.data + roi_y_min_ * image_width_ + roi_x_min_;
BlockUnionFind(img);
int cur_idx = 0;
int curt_label = 0;
int cc_count = 0;
root_map_.assign(total_pix_, -1);
for (int y = roi_y_min_; y <= roi_y_max_; ++y) {
for (int x = roi_x_min_; x <= roi_x_max_; ++x) {
curt_label = labels_[cur_idx];
if (curt_label >= 0) {
if (curt_label >= static_cast<int>(total_pix_)) {
std::cerr << "curt_label should be smaller than root_map.size() "
<< curt_label << " (" << total_pix_ << ")." << std::endl;
return false;
}
if (root_map_[curt_label] != -1) {
cc->at(root_map_[curt_label])->AddPixel(x, y);
} else {
cc->push_back(std::make_shared<ConnectedComponent>(x, y));
root_map_[curt_label] = cc_count++;
}
}
++cur_idx;
} // end for x
} // end for y
std::cout << "#cc = " << cc_count << std::endl;
return true;
}
} // namespace perception
} // namespace apollo
| 877901520975206b3008bda7510fb09c17b3782a.cu | /******************************************************************************
* Copyright (c) 2014, Victor Matheus de Araujo Oliveira All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "connected_component_gpu.h"
#include "texture.h"
#include "block_uf.h"
namespace apollo {
namespace perception {
using std::shared_ptr;
using std::unordered_set;
using std::vector;
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height)
: image_width_(image_width),
image_height_(image_height),
width_(image_width),
height_(image_height),
roi_x_min_(0),
roi_y_min_(0),
roi_x_max_(image_width - 1),
roi_y_max_(image_height - 1) {
total_pix_ =
static_cast<size_t>(image_width_) * static_cast<size_t>(image_height_);
cudaChannelFormatDesc uchar_desc = cudaCreateChannelDesc<unsigned char>();
cudaMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
cudaBindTextureToArray(img_tex, img_array_, uchar_desc);
cudaMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
cudaError_t cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr
<< "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
ConnectedComponentGeneratorGPU::ConnectedComponentGeneratorGPU(int image_width,
int image_height,
cv::Rect roi)
: image_width_(image_width),
image_height_(image_height),
width_(roi.width),
height_(roi.height),
roi_x_min_(roi.x),
roi_y_min_(roi.y),
roi_x_max_(roi.x + roi.width - 1),
roi_y_max_(roi.y + roi.height - 1) {
if (roi_x_min_ < 0) {
std::cerr << "x_min is less than 0: " << roi_x_min_ << std::endl;
}
if (roi_y_min_ < 0) {
std::cerr << "y_min is less than 0: " << roi_y_min_ << std::endl;
}
if (roi_x_max_ >= image_width_) {
std::cerr << "x_max is larger than image width: "
<< roi_x_max_ << "|"
<< image_width_ << std::endl;
}
if (roi_y_max_ >= image_height_) {
std::cerr << "y_max is larger than image height: "
<< roi_y_max_ << "|"
<< image_height_ << std::endl;
}
total_pix_ = static_cast<size_t>(width_) * static_cast<size_t>(height_);
cudaChannelFormatDesc uchar_desc = cudaCreateChannelDesc<unsigned char>();
cudaMallocArray(&img_array_, &uchar_desc, static_cast<size_t>(width_),
static_cast<size_t>(height_));
cudaBindTextureToArray(img_tex, img_array_, uchar_desc);
cudaMalloc(
reinterpret_cast<void**>(&label_array_),
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int));
cudaError_t cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr << "failed to initialize 'img_array' and 'label_array' with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
}
labels_ = static_cast<int*>(malloc(total_pix_ * sizeof(int)));
root_map_.reserve(total_pix_);
}
bool ConnectedComponentGeneratorGPU::BlockUnionFind(const unsigned char* img) {
cudaError_t cuda_err;
if (width_ == image_width_) {
size_t siz = static_cast<size_t>(width_) * static_cast<size_t>(height_) *
sizeof(unsigned char);
cudaMemcpyToArray(img_array_, 0, 0, img, siz, cudaMemcpyHostToDevice);
} else {
size_t siz = static_cast<size_t>(width_) * sizeof(unsigned char);
for (size_t i = 0; i < static_cast<size_t>(height_); ++i) {
cudaMemcpyToArray(img_array_, 0, i, img, siz, cudaMemcpyHostToDevice);
img += image_width_;
}
}
dim3 block(UF_BLOCK_WIDTH, UF_BLOCK_HEIGHT);
dim3 grid(
static_cast<unsigned int>((width_ + UF_BLOCK_WIDTH - 1) / UF_BLOCK_WIDTH),
static_cast<unsigned int>((height_ + UF_BLOCK_HEIGHT - 1) /
UF_BLOCK_HEIGHT));
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr << "failed to start block union find with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
return false;
}
cudaThreadSetCacheConfig(cudaFuncCachePreferShared);
block_uf::BlockUnionFindInternal<<<grid, block>>>(label_array_, width_,
height_);
cudaThreadSetCacheConfig(cudaFuncCachePreferL1);
block_uf::BlockUnionFindBoundary<<<grid, block>>>(label_array_, width_,
height_);
block_uf::BlockUnionFindRoot<<<grid, block>>>(label_array_, width_, height_);
cudaMemcpy(
labels_, label_array_,
static_cast<size_t>(width_) * static_cast<size_t>(height_) * sizeof(int),
cudaMemcpyDeviceToHost);
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess) {
std::cerr << "failed to finish block union find with CUDA: "
<< cudaGetErrorString(cuda_err) << std::endl;
return false;
}
return true;
}
bool ConnectedComponentGeneratorGPU::FindConnectedComponents(
const cv::Mat& lane_map, vector<shared_ptr<ConnectedComponent>>* cc) {
if (lane_map.empty()) {
std::cerr << "The input lane map is empty." << std::endl;
return false;
}
if (lane_map.type() != CV_8UC1) {
std::cerr << "The input lane map type is not CV_8UC1." << std::endl;
return false;
}
if (lane_map.cols != image_width_) {
std::cerr << "The width of input lane map does not match." << std::endl;
return false;
}
if (lane_map.rows != image_height_) {
std::cerr << "The height of input lane map does not match." << std::endl;
return false;
}
if (cc == NULL) {
std::cerr << "The pointer of output connected components is null."
<< std::endl;
return false;
}
cc->clear();
const unsigned char* img =
lane_map.data + roi_y_min_ * image_width_ + roi_x_min_;
BlockUnionFind(img);
int cur_idx = 0;
int curt_label = 0;
int cc_count = 0;
root_map_.assign(total_pix_, -1);
for (int y = roi_y_min_; y <= roi_y_max_; ++y) {
for (int x = roi_x_min_; x <= roi_x_max_; ++x) {
curt_label = labels_[cur_idx];
if (curt_label >= 0) {
if (curt_label >= static_cast<int>(total_pix_)) {
std::cerr << "curt_label should be smaller than root_map.size() "
<< curt_label << " (" << total_pix_ << ")." << std::endl;
return false;
}
if (root_map_[curt_label] != -1) {
cc->at(root_map_[curt_label])->AddPixel(x, y);
} else {
cc->push_back(std::make_shared<ConnectedComponent>(x, y));
root_map_[curt_label] = cc_count++;
}
}
++cur_idx;
} // end for x
} // end for y
std::cout << "#cc = " << cc_count << std::endl;
return true;
}
} // namespace perception
} // namespace apollo
|
d84ef1f0efdb2f2855a92aae09897b7ea37c2490.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include <time.h>
#define MAX_THREADS_X_IN_BLOCK_SIZE 1024
#define MAX_BLOCK_X_AMOUNT 1024
#define OPTIMAL_AMOUNT_THREADS_IN_BLOCK_SIZE 256
#define OPTIMAL_BLOCKS_AMOUNT 512
#define MAX_CHUNK_ARRAY_SIZE (MAX_THREADS_X_IN_BLOCK_SIZE * MAX_BLOCK_X_AMOUNT)
using namespace std;
clock_t startCUDAWorkWithoutAllocation, endCUDAWorkWithoutAllocation;
double timeUsedCUDAWorkWithoutAllocation = 0;
hipError_t arrayCalculations(double* c, const double* a, const double* b, unsigned int size);
unsigned int loadFileToArray(std::ifstream &file, double* a, unsigned int sizeToRead);
int saveArrayToFile(double* c, string fileName, unsigned int sizeToWrite);
__global__ void calculateKernel(double* c, const double* a, const double* b, long long size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < size; i += stride)
c[i] = (a[i] - b[i]) * 4 + 5;
}
int main()
{
// 2 ** 20 - max possible array chunk
const unsigned int arraySize = MAX_CHUNK_ARRAY_SIZE;
double a[arraySize] = {};
double b[arraySize] = {};
double c[arraySize] = {};
//long long calculationLength = ::pow(2, 33);
long long calculationLength = (long long)::pow(2, 21);
string fileName1 = "..\\test_file1_floats.txt";
string fileName2 = "..\\test_file2_floats.txt";
string fileNameResult = "..\\result.txt";
long long currentFilePossition = 0;
int numberOfIterations = (int) (calculationLength / MAX_CHUNK_ARRAY_SIZE);
hipError_t cudaStatus;
std::ifstream file1(fileName1);
std::ifstream file2(fileName2);
clock_t startProgramWork, endProgramWork;
clock_t startCUDAWork, endCUDAWork;
double timeUsedProgramWork = 0;
double timeUsedCUDAWork = 0;
startProgramWork = clock();
for (size_t i = 0; i < numberOfIterations; i++)
{
// setArrays from files
unsigned int setSize1 = loadFileToArray(file1, a, arraySize);
unsigned int setSize2 = loadFileToArray(file2, b, arraySize);
if (setSize1 == 0 || setSize2 == 0)
{
printf("Unable to open file for reading.");
return 1;
}
if (setSize1 != setSize2)
{
printf("Lines in files should be of the same length.");
return 1;
}
startCUDAWork = clock();
// Add vectors in parallel.
cudaStatus = arrayCalculations(c, a, b, setSize1);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "arrayCalculations failed!");
return 1;
}
endCUDAWork = clock();
timeUsedCUDAWork += ((double)(endCUDAWork - startCUDAWork)) / CLOCKS_PER_SEC;
// save result array to file
int result = saveArrayToFile(c, fileNameResult, setSize1);
if (result == -1)
{
printf("Unable to open file for writing.");
return 1;
}
currentFilePossition += setSize1;
}
file1.close();
file2.close();
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
endProgramWork = clock();
timeUsedProgramWork += ((double)(endProgramWork - startProgramWork)) / CLOCKS_PER_SEC;
printf("Time elapsed: %f\n", timeUsedProgramWork);
printf("Time elapsed for CUDA work: %f\n", timeUsedCUDAWork);
printf("Time elapsed for CUDA without memory allocation: %f\n", timeUsedCUDAWorkWithoutAllocation);
return 0;
}
// recieves files with values from [0, 100)
unsigned int loadFileToArray(std::ifstream &file, double* a, unsigned int sizeToRead) {
std::string line;
if (!file.is_open())
{
return 0;
}
unsigned int loop = 0;
double currentValue = 0;
while (!file.eof() && loop < sizeToRead)
{
std::getline(file, line);
currentValue = std::stod(line) / 100;
a[loop] = currentValue;
loop++;
}
return loop;
}
int saveArrayToFile(double* c, string fileName, unsigned int sizeToWrite)
{
ofstream file(fileName, std::ios_base::app);
if (!file.is_open())
{
return -1;
}
for (unsigned int count = 0; count < sizeToWrite; count++) {
std::ostringstream ss;
ss << c[count];
ss << "\n";
file << ss.str();
}
file.close();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t arrayCalculations(double* c, const double* a, const double* b, unsigned int size)
{
double* dev_a = 0;
double* dev_b = 0;
double* dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = hipMalloc((void**)& dev_c, size * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_a, size * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_b, size * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
startCUDAWorkWithoutAllocation = clock();
// Launch a kernel on the GPU with one thread for each element.
int numBlocks = (size + MAX_THREADS_X_IN_BLOCK_SIZE - 1) / MAX_THREADS_X_IN_BLOCK_SIZE;;
int numThreads = MAX_THREADS_X_IN_BLOCK_SIZE;
if (size < MAX_THREADS_X_IN_BLOCK_SIZE)
{
numBlocks = 1;
numThreads = size;
}
hipLaunchKernelGGL(( calculateKernel) , dim3(numBlocks), dim3(numThreads), 0, 0, dev_c, dev_a, dev_b, size);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "arrayCalculations launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching arrayCalculations!\n", cudaStatus);
goto Error;
}
endCUDAWorkWithoutAllocation = clock();
timeUsedCUDAWorkWithoutAllocation += ((double)(endCUDAWorkWithoutAllocation - startCUDAWorkWithoutAllocation)) / CLOCKS_PER_SEC;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| d84ef1f0efdb2f2855a92aae09897b7ea37c2490.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include <time.h>
#define MAX_THREADS_X_IN_BLOCK_SIZE 1024
#define MAX_BLOCK_X_AMOUNT 1024
#define OPTIMAL_AMOUNT_THREADS_IN_BLOCK_SIZE 256
#define OPTIMAL_BLOCKS_AMOUNT 512
#define MAX_CHUNK_ARRAY_SIZE (MAX_THREADS_X_IN_BLOCK_SIZE * MAX_BLOCK_X_AMOUNT)
using namespace std;
clock_t startCUDAWorkWithoutAllocation, endCUDAWorkWithoutAllocation;
double timeUsedCUDAWorkWithoutAllocation = 0;
cudaError_t arrayCalculations(double* c, const double* a, const double* b, unsigned int size);
unsigned int loadFileToArray(std::ifstream &file, double* a, unsigned int sizeToRead);
int saveArrayToFile(double* c, string fileName, unsigned int sizeToWrite);
__global__ void calculateKernel(double* c, const double* a, const double* b, long long size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < size; i += stride)
c[i] = (a[i] - b[i]) * 4 + 5;
}
int main()
{
// 2 ** 20 - max possible array chunk
const unsigned int arraySize = MAX_CHUNK_ARRAY_SIZE;
double a[arraySize] = {};
double b[arraySize] = {};
double c[arraySize] = {};
//long long calculationLength = std::pow(2, 33);
long long calculationLength = (long long)std::pow(2, 21);
string fileName1 = "..\\test_file1_floats.txt";
string fileName2 = "..\\test_file2_floats.txt";
string fileNameResult = "..\\result.txt";
long long currentFilePossition = 0;
int numberOfIterations = (int) (calculationLength / MAX_CHUNK_ARRAY_SIZE);
cudaError_t cudaStatus;
std::ifstream file1(fileName1);
std::ifstream file2(fileName2);
clock_t startProgramWork, endProgramWork;
clock_t startCUDAWork, endCUDAWork;
double timeUsedProgramWork = 0;
double timeUsedCUDAWork = 0;
startProgramWork = clock();
for (size_t i = 0; i < numberOfIterations; i++)
{
// setArrays from files
unsigned int setSize1 = loadFileToArray(file1, a, arraySize);
unsigned int setSize2 = loadFileToArray(file2, b, arraySize);
if (setSize1 == 0 || setSize2 == 0)
{
printf("Unable to open file for reading.");
return 1;
}
if (setSize1 != setSize2)
{
printf("Lines in files should be of the same length.");
return 1;
}
startCUDAWork = clock();
// Add vectors in parallel.
cudaStatus = arrayCalculations(c, a, b, setSize1);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "arrayCalculations failed!");
return 1;
}
endCUDAWork = clock();
timeUsedCUDAWork += ((double)(endCUDAWork - startCUDAWork)) / CLOCKS_PER_SEC;
// save result array to file
int result = saveArrayToFile(c, fileNameResult, setSize1);
if (result == -1)
{
printf("Unable to open file for writing.");
return 1;
}
currentFilePossition += setSize1;
}
file1.close();
file2.close();
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
endProgramWork = clock();
timeUsedProgramWork += ((double)(endProgramWork - startProgramWork)) / CLOCKS_PER_SEC;
printf("Time elapsed: %f\n", timeUsedProgramWork);
printf("Time elapsed for CUDA work: %f\n", timeUsedCUDAWork);
printf("Time elapsed for CUDA without memory allocation: %f\n", timeUsedCUDAWorkWithoutAllocation);
return 0;
}
// recieves files with values from [0, 100)
unsigned int loadFileToArray(std::ifstream &file, double* a, unsigned int sizeToRead) {
std::string line;
if (!file.is_open())
{
return 0;
}
unsigned int loop = 0;
double currentValue = 0;
while (!file.eof() && loop < sizeToRead)
{
std::getline(file, line);
currentValue = std::stod(line) / 100;
a[loop] = currentValue;
loop++;
}
return loop;
}
int saveArrayToFile(double* c, string fileName, unsigned int sizeToWrite)
{
ofstream file(fileName, std::ios_base::app);
if (!file.is_open())
{
return -1;
}
for (unsigned int count = 0; count < sizeToWrite; count++) {
std::ostringstream ss;
ss << c[count];
ss << "\n";
file << ss.str();
}
file.close();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t arrayCalculations(double* c, const double* a, const double* b, unsigned int size)
{
double* dev_a = 0;
double* dev_b = 0;
double* dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = cudaMalloc((void**)& dev_c, size * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_a, size * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_b, size * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
startCUDAWorkWithoutAllocation = clock();
// Launch a kernel on the GPU with one thread for each element.
int numBlocks = (size + MAX_THREADS_X_IN_BLOCK_SIZE - 1) / MAX_THREADS_X_IN_BLOCK_SIZE;;
int numThreads = MAX_THREADS_X_IN_BLOCK_SIZE;
if (size < MAX_THREADS_X_IN_BLOCK_SIZE)
{
numBlocks = 1;
numThreads = size;
}
calculateKernel <<<numBlocks, numThreads>>> (dev_c, dev_a, dev_b, size);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "arrayCalculations launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching arrayCalculations!\n", cudaStatus);
goto Error;
}
endCUDAWorkWithoutAllocation = clock();
timeUsedCUDAWorkWithoutAllocation += ((double)(endCUDAWorkWithoutAllocation - startCUDAWorkWithoutAllocation)) / CLOCKS_PER_SEC;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
24ea9064b228a07d4d4337b2543b0d594fa1e490.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __FINDMIN_KERNEL__
#define __FINDMIN_KERNEL__
#define INF 1073741824
__global__ void findMin1_kernel(int *costArray, char *commit, int *bufferBlock, int *minValue, int n)
{
__shared__ int sdata[1024];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; //(65533*1024)
int threshold = *minValue;
if ( i>=n || commit[i]==1 )
sdata[tid] = INF;
else
sdata[tid] = costArray[i];
if ( sdata[tid]<threshold )
sdata[tid] = INF;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1){
if (tid<s){
if ( sdata[tid]>sdata[tid+s] )
sdata[tid] = sdata[tid+s];
}
__syncthreads();
}
if (tid == 0)
bufferBlock[blockIdx.x] = sdata[0];
}
__global__ void findMin2_kernel(int *bufferBlock, int *costArray, int n)
{
__shared__ int sdata[1024];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; //(less than 65533)
if (i<n){
sdata[tid] = costArray[i];
}
else {
sdata[tid] = INF;
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1){
if (tid<s){
if ( sdata[tid]>sdata[tid+s] )
sdata[tid] = sdata[tid+s];
}
__syncthreads();
}
if (tid == 0)
bufferBlock[blockIdx.x] = sdata[0];
}
__global__ void findMin3_kernel( int *bufferBlock, int n, int *minValue )
{
__shared__ int sdata[1024];
unsigned int tid = threadIdx.x; // node number is less than 1024
if (tid<n){
sdata[tid] = bufferBlock[tid];
}
else {
sdata[tid] = INF;
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1){
if (tid<s){
if ( sdata[tid]>sdata[tid+s] )
sdata[tid] = sdata[tid+s];
}
__syncthreads();
}
if (tid == 0)
*minValue = sdata[tid];
}
#endif
| 24ea9064b228a07d4d4337b2543b0d594fa1e490.cu | #ifndef __FINDMIN_KERNEL__
#define __FINDMIN_KERNEL__
#define INF 1073741824
__global__ void findMin1_kernel(int *costArray, char *commit, int *bufferBlock, int *minValue, int n)
{
__shared__ int sdata[1024];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; //(65533*1024)
int threshold = *minValue;
if ( i>=n || commit[i]==1 )
sdata[tid] = INF;
else
sdata[tid] = costArray[i];
if ( sdata[tid]<threshold )
sdata[tid] = INF;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1){
if (tid<s){
if ( sdata[tid]>sdata[tid+s] )
sdata[tid] = sdata[tid+s];
}
__syncthreads();
}
if (tid == 0)
bufferBlock[blockIdx.x] = sdata[0];
}
__global__ void findMin2_kernel(int *bufferBlock, int *costArray, int n)
{
__shared__ int sdata[1024];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; //(less than 65533)
if (i<n){
sdata[tid] = costArray[i];
}
else {
sdata[tid] = INF;
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1){
if (tid<s){
if ( sdata[tid]>sdata[tid+s] )
sdata[tid] = sdata[tid+s];
}
__syncthreads();
}
if (tid == 0)
bufferBlock[blockIdx.x] = sdata[0];
}
__global__ void findMin3_kernel( int *bufferBlock, int n, int *minValue )
{
__shared__ int sdata[1024];
unsigned int tid = threadIdx.x; // node number is less than 1024
if (tid<n){
sdata[tid] = bufferBlock[tid];
}
else {
sdata[tid] = INF;
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1){
if (tid<s){
if ( sdata[tid]>sdata[tid+s] )
sdata[tid] = sdata[tid+s];
}
__syncthreads();
}
if (tid == 0)
*minValue = sdata[tid];
}
#endif
|
sobel_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*Zachary Job
*sorbelGPU.cu
*
*needs binning
*/
#ifndef _SOBEL_KERNEL_
#define _SOBEL_KERNEL_
#include "defs.h"
/*
SOOOOOORRy.. It got a little intense. I realized I had a bug and I
hot fixed it last minute
*
*Applies the filter using shared memory->output buffer
*
*/
__global__ void sorbelCU(int *imgIn, int *resOut,
const int imgWidth, const int imgHeight,
const int imgWidthOffs, const int imgHeightOffs,
const int imgSZ, const size_t thrs)
{
__shared__ int imgSh[FILTER_SZ];
int offsSh, o_l, o_u, loc, shloc,
x, y, x_gr = 0, y_gr = 0;
//Block/Thread indexes
o_l = THREADS_PER_DIM * blockIdx.x;
x = o_l + threadIdx.x;
o_u = THREADS_PER_DIM * blockIdx.y;
y = o_u + threadIdx.y;
shloc = threadIdx.x + threadIdx.y * THREADS_PER_DIM; //reused to free up registers
//Block offset in shared
offsSh = FILTER_BLOCK_OFFS + shloc + (threadIdx.y << 1);
//Img to shared offset
shloc = shloc << 1;
loc = o_l + o_u * imgWidth - imgWidth - 1 // shared start location offset in Img
+ (shloc % FILTER_DIM) // x offset modulo
+ imgWidth * (shloc / FILTER_DIM); // y offset
//Ensure valid result, the one thread to two indexes
//Preferred if statement over computation in order to
//keep shared data size at a multiple that optimized
//its use
if(shloc < FILTER_SZ - 1 && loc >= 0 && loc < imgSZ)
{
imgSh[shloc] = imgIn[loc];
imgSh[shloc + 1] = imgIn[loc + 1];
}
__syncthreads();
//Apply filter to valid indexes, reuse shloc to save registers
shloc = ((int)(x && y && x < imgWidthOffs && y < imgHeightOffs)) << sizeof(int);
o_u = offsSh - FILTER_DIM;
o_l = offsSh + FILTER_DIM;
x_gr = //Granularity x -> 3 above, 3 below, 3x3 grid, xy center
/************+1**********************+2*******************+1*******************/
/**/ (imgSh[o_u - 1] + /**/ (imgSh[o_u] << 1) +/**/ imgSh[o_u + 1] - /**/
/******************************************************************************/
/**/ /*0*/ /**/ /*ELEM*/ /**/ /*0*/ /**/
/***********-1***********************-2*******************-1*******************/
/**/ imgSh[o_l - 1] - /**/ (imgSh[o_l] << 1) -/**/ imgSh[o_l + 1]) /**/
/******************************************************************************/
& shloc;
y_gr = (0 //Granularity y -> 3 left, 3 right, 3x3 grid, xy center
/***********-1**********************************************+1*****************/
/**/ - imgSh[o_u - 1] /**/ /*0*/ /**/ + imgSh[o_u + 1] /**/
/***********-2**********************************************+2*****************/
/**/ - (imgSh[offsSh - 1] << 1) /**//*OFST/ /**/ + (imgSh[offsSh + 1] << 1) /**/
/***********-1**********************************************+1*****************/
/**/ - imgSh[o_l - 1] /**/ /*0*/ /**/ + imgSh[o_l + 1]) /**/
/******************************************************************************/
& shloc;
//Update result according to magnitude, or if border blacken
resOut[x + y * imgWidth] = 255 * (int)((x_gr * x_gr + y_gr * y_gr) > thrs);
}
#endif | sobel_kernel.cu | /*
*Zachary Job
*sorbelGPU.cu
*
*needs binning
*/
#ifndef _SOBEL_KERNEL_
#define _SOBEL_KERNEL_
#include "defs.h"
/*
SOOOOOORRy.. It got a little intense. I realized I had a bug and I
hot fixed it last minute
*
*Applies the filter using shared memory->output buffer
*
*/
__global__ void sorbelCU(int *imgIn, int *resOut,
const int imgWidth, const int imgHeight,
const int imgWidthOffs, const int imgHeightOffs,
const int imgSZ, const size_t thrs)
{
__shared__ int imgSh[FILTER_SZ];
int offsSh, o_l, o_u, loc, shloc,
x, y, x_gr = 0, y_gr = 0;
//Block/Thread indexes
o_l = THREADS_PER_DIM * blockIdx.x;
x = o_l + threadIdx.x;
o_u = THREADS_PER_DIM * blockIdx.y;
y = o_u + threadIdx.y;
shloc = threadIdx.x + threadIdx.y * THREADS_PER_DIM; //reused to free up registers
//Block offset in shared
offsSh = FILTER_BLOCK_OFFS + shloc + (threadIdx.y << 1);
//Img to shared offset
shloc = shloc << 1;
loc = o_l + o_u * imgWidth - imgWidth - 1 // shared start location offset in Img
+ (shloc % FILTER_DIM) // x offset modulo
+ imgWidth * (shloc / FILTER_DIM); // y offset
//Ensure valid result, the one thread to two indexes
//Preferred if statement over computation in order to
//keep shared data size at a multiple that optimized
//its use
if(shloc < FILTER_SZ - 1 && loc >= 0 && loc < imgSZ)
{
imgSh[shloc] = imgIn[loc];
imgSh[shloc + 1] = imgIn[loc + 1];
}
__syncthreads();
//Apply filter to valid indexes, reuse shloc to save registers
shloc = ((int)(x && y && x < imgWidthOffs && y < imgHeightOffs)) << sizeof(int);
o_u = offsSh - FILTER_DIM;
o_l = offsSh + FILTER_DIM;
x_gr = //Granularity x -> 3 above, 3 below, 3x3 grid, xy center
/************+1**********************+2*******************+1*******************/
/**/ (imgSh[o_u - 1] + /**/ (imgSh[o_u] << 1) +/**/ imgSh[o_u + 1] - /**/
/******************************************************************************/
/**/ /*0*/ /**/ /*ELEM*/ /**/ /*0*/ /**/
/***********-1***********************-2*******************-1*******************/
/**/ imgSh[o_l - 1] - /**/ (imgSh[o_l] << 1) -/**/ imgSh[o_l + 1]) /**/
/******************************************************************************/
& shloc;
y_gr = (0 //Granularity y -> 3 left, 3 right, 3x3 grid, xy center
/***********-1**********************************************+1*****************/
/**/ - imgSh[o_u - 1] /**/ /*0*/ /**/ + imgSh[o_u + 1] /**/
/***********-2**********************************************+2*****************/
/**/ - (imgSh[offsSh - 1] << 1) /**//*OFST/ /**/ + (imgSh[offsSh + 1] << 1) /**/
/***********-1**********************************************+1*****************/
/**/ - imgSh[o_l - 1] /**/ /*0*/ /**/ + imgSh[o_l + 1]) /**/
/******************************************************************************/
& shloc;
//Update result according to magnitude, or if border blacken
resOut[x + y * imgWidth] = 255 * (int)((x_gr * x_gr + y_gr * y_gr) > thrs);
}
#endif |
0aedaa1c331f127152a6fadd1c2c6d3e76ab6090.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <map>
#include <math.h>
#include "xyzio.h"
#include "dcdio.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <float.h>
#include "vec_utils.h"
#include "timer.h"
#define R_ACT (0.1f)
#define E_ACT (5000.0f) //J/mol
#define DH (5000.0f) //J/mol
#define DT (2.4e-6) //nsec
#define LIMIT (100.0f) //nm
#define PAIRSCUTOFF (16.0f)
#define MAXPAIRS (1000)
#define HARMCUTOFF (1.0f)
#define HARMSCALE (5000.0f)
#define TYPENONE (0)
#define TYPEA (1)
#define TYPEB (2)
#define TYPEC (3)
#define BLOCK_SIZE (256)
template <typename T>
class hd_vector {
public:
void resize(size_t len_) {
h.resize(len_);
d.resize(len_);
}
void h2d() { d = h; }
void d2h() { h = d; }
T* d_ptr() { return thrust::raw_pointer_cast(d.data()); }
T* h_ptr() { return thrust::raw_pointer_cast(h.data()); }
thrust::host_vector<T> h;
thrust::device_vector<T> d;
};
typedef struct
{
hd_vector<float3> c;
hd_vector<float3> v;
hd_vector<float> m;
hd_vector<int> t;
hd_vector<int> nearest;
hd_vector<float> nearest_dist;
} Particles;
typedef struct
{
float3 *c;
float3 *v;
float *m;
int *t;
int *nearest;
float *nearest_dist;
float3 *f;
int *pairsCount;
int *pairs;
} d_Particles;
void checkCUDAError(){
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("CUDA error: %s \n", hipGetErrorString(error));
exit(0);
}
}
__global__ void find_nearest(d_Particles parts, int atom_count)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < atom_count)
{
parts.nearest_dist[i] = FLT_MAX;
parts.nearest[i] = -1;
float3 pi = parts.c[i];
for(unsigned int p = 0; p < parts.pairsCount[i]; p++)
{
int j = parts.pairs[p*atom_count+i];
if( (parts.t[i] != parts.t[j]) && (parts.t[j] != TYPENONE) && (i != j))
{
// if(parts.nearest[i] >= 0)
{
float3 pj = parts.c[j];
float dist = getDistance(pi,pj,LIMIT);
if(parts.nearest_dist[i] > dist)
{
parts.nearest_dist[i] = dist;
parts.nearest[i] = j;
}
}
}
}
}
}
__global__ void pairlistGPU(d_Particles parts, int atom_count)
{
int i, j;
i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < atom_count)
{
float3 ri = parts.c[i];
int pairsCount = 0;
for(j = 0; j < atom_count; j++)
{
if ( (j != i) && (parts.t[j] != TYPENONE) )
{
float3 rj = parts.c[j];
if( (getDistance(ri, rj, LIMIT) < PAIRSCUTOFF) && pairsCount < MAXPAIRS)
{
parts.pairs[pairsCount*atom_count + i] = j;
pairsCount ++;
}
}
}
parts.pairsCount[i] = pairsCount;
}
}
__global__ void perform_collisions(d_Particles parts, hiprandState_t *state, int atom_count)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < atom_count)
{
// if( (parts.nearest_dist[i] < R_ACT) && (parts.nearest[parts.nearest[i]] == i) )
if(parts.nearest[i] >= 0)
{
if( (parts.nearest_dist[i] < R_ACT) && (parts.nearest[parts.nearest[i]] == i) && (parts.t[i] == TYPEA) && (parts.t[parts.nearest[i]] == TYPEB) && (parts.t[i] != TYPENONE) )
{
int j = parts.nearest[i];
float3 vi = parts.v[i];
float ei = parts.m[i]*(vi.x*vi.x+vi.y*vi.y+vi.z*vi.z)/2;
float3 vj = parts.v[j];
float ej = parts.m[j]*(vj.x*vj.x+vj.y*vj.y+vj.z*vj.z)/2;
if(ei + ej > E_ACT)
{
float scale = sqrt(2*(ei+ej-E_ACT+DH)/(parts.m[i]+parts.m[j]));
float phi = 2*M_PI*(float)hiprand_uniform(&state[i]);
float teta = M_PI*((float)hiprand_uniform(&state[i]) - 0.5);
parts.v[j].x = sinf(teta)*cosf(phi)*scale;
parts.v[j].y = sinf(teta)*sin(phi)*scale;
parts.v[j].z = cosf(teta)*scale;
parts.t[j] = 3;
//destroy A
parts.c[i].x = 0;
parts.c[i].y = 0;
parts.c[i].z = 0;
parts.v[i].x = 0;
parts.v[i].y = 0;
parts.v[i].z = 0;
parts.t[i] = 0;
}
}
}
}
}
__global__ void integrate(d_Particles parts, int atom_count)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < atom_count)
{
if(parts.t[i] != TYPENONE)
{
parts.c[i].x += parts.v[i].x*DT;
parts.c[i].y += parts.v[i].y*DT;
parts.c[i].z += parts.v[i].z*DT;
parts.c[i] = transferPBC(parts.c[i], LIMIT);
parts.v[i].x += parts.f[i].x*DT/parts.m[i];
parts.v[i].y += parts.f[i].y*DT/parts.m[i];
parts.v[i].z += parts.f[i].z*DT/parts.m[i];
parts.f[i].x = 0;
parts.f[i].y = 0;
parts.f[i].z = 0;
if( (!isfinite(parts.c[i].x)) || (!isfinite(parts.c[i].y)) || (!isfinite(parts.c[i].z)))
{
parts.t[i] = TYPENONE;
}
}
}
}
__global__ void initCurand(hiprandState_t *state, unsigned long seed)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(seed, idx, idx, &state[idx]);
}
__global__ void computeLJPairlist(float3* r, float3* f, int* type, int N, int* d_pairsCount, int* d_pairs)
{
int i, j, p;
i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
float3 fi = f[i];
float3 ri = r[i];
for(p = 0; p < d_pairsCount[i]; p++)
{
j = d_pairs[p*N + i];
if(type[j] != TYPENONE)
{
float3 rj = r[j];
float Bij = 1.0f;
float epsilon = 3e-2f;
float sigma = 2e-1f;
float3 rij = getVector(ri, rj, LIMIT);
float rijmod2 = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z;
float srij2 = sigma*sigma/rijmod2;
float srij6 = srij2*srij2*srij2;
float df = -6.0f*epsilon*srij6*Bij/rijmod2;
if(!isfinite(df))
{
df = 0.0f;
}
fi.x += df*rij.x;
fi.y += df*rij.y;
fi.z += df*rij.z;
}
}
f[i] = fi;
if(!isfinite(f[i].x))
f[i].x = 0.0f;
if(!isfinite(f[i].y))
f[i].y = 0.0f;
if(!isfinite(f[i].z))
f[i].z = 0.0f;
}
}
__global__ void computeHarmPairlist(d_Particles parts, int atom_count)
{
int i, j, p;
i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < atom_count)
{
float3 fi = parts.f[i];
float3 ri = parts.c[i];
for(p = 0; p < parts.pairsCount[i]; p++)
{
j = parts.pairs[p*atom_count + i];
if(parts.t[j] != TYPENONE)
{
float3 rj = parts.c[j];
float3 rij = getVector(ri, rj, LIMIT);
float rijmod2 = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z;
float rijmod = sqrtf(rijmod2);
float df = 0.0f;
if(rijmod <= HARMCUTOFF)
{
df += HARMSCALE*(rijmod - HARMCUTOFF)*(rijmod - HARMCUTOFF);
}
if(!isfinite(df))
{
df = 0.0f;
}
fi.x += df*rij.x;
fi.y += df*rij.y;
fi.z += df*rij.z;
}
}
parts.f[i] = fi;
if(!isfinite(parts.f[i].x))
parts.f[i].x = 0.0f;
if(!isfinite(parts.f[i].y))
parts.f[i].y = 0.0f;
if(!isfinite(parts.f[i].z))
parts.f[i].z = 0.0f;
}
}
int main(int argc, char **argv)
{
hipSetDevice(1);
std::map<char, int> name2ind;
name2ind['H'] = TYPEA;
name2ind['O'] = TYPEB;
name2ind['N'] = TYPEC;
std::map<int, char> ind2name;
ind2name[TYPEA] = 'H';
ind2name[TYPEB] = 'O';
ind2name[TYPEC] = 'N';
std::map<char, float> name2mass;
name2mass['H'] = 0.012;
name2mass['O'] = 0.016;
name2mass['N'] = 0.028;
std::map<int, float> ind2mass;
ind2mass[TYPEA] = 0.012;
ind2mass[TYPEB] = 0.016;
ind2mass[TYPEC] = 0.028;
XYZ in_xyz;
readXYZ(argv[1], &in_xyz);
Particles part;
std::cout<<"Total particles "<<in_xyz.atomCount<<std::endl;
part.c.resize(in_xyz.atomCount);
part.v.resize(in_xyz.atomCount);
part.m.resize(in_xyz.atomCount);
part.t.resize(in_xyz.atomCount);
part.nearest.resize(in_xyz.atomCount);
part.nearest_dist.resize(in_xyz.atomCount);
for(unsigned int i = 0; i < in_xyz.atomCount; i++)
{
part.t.h[i] = name2ind[in_xyz.atoms[i].name];
part.m.h[i] = name2mass[in_xyz.atoms[i].name];
part.c.h[i].x = in_xyz.atoms[i].x;
part.c.h[i].y = in_xyz.atoms[i].y;
part.c.h[i].z = in_xyz.atoms[i].z;
part.nearest.h[i] = -1;
}
readXYZ(argv[2], &in_xyz);
for(unsigned int i = 0; i < in_xyz.atomCount; i++)
{
part.v.h[i].x = in_xyz.atoms[i].x;
part.v.h[i].y = in_xyz.atoms[i].y;
part.v.h[i].z = in_xyz.atoms[i].z;
}
part.c.h2d();
part.v.h2d();
part.m.h2d();
part.t.h2d();
part.nearest.h2d();
part.nearest_dist.h2d();
d_Particles d_part;
d_part.c = part.c.d_ptr();
d_part.v = part.v.d_ptr();
d_part.m = part.m.d_ptr();
d_part.t = part.t.d_ptr();
d_part.nearest = part.nearest.d_ptr();
d_part.nearest_dist = part.nearest_dist.d_ptr();
hipMalloc((void**)&(d_part.f), in_xyz.atomCount * sizeof(float3));
hipMalloc((void**)&(d_part.pairsCount), in_xyz.atomCount * sizeof(int));
hipMalloc((void**)&(d_part.pairs), in_xyz.atomCount* MAXPAIRS * sizeof(int));
checkCUDAError();
hiprandState_t *devState;
hipMalloc((void**)&devState, in_xyz.atomCount * sizeof(hiprandState_t));
hipLaunchKernelGGL(( initCurand), dim3((in_xyz.atomCount)/BLOCK_SIZE+1), dim3(BLOCK_SIZE), 0, 0, devState, 100500);
hipDeviceSynchronize();
DCD dcd;
DCD vels;
int Nsteps = 25000000;
int pairsfreq = 100;
int dcdfreq = 10;
dcd.N = 3*in_xyz.atomCount; vels.N = 3*in_xyz.atomCount;
dcd.NFILE = Nsteps/dcdfreq; vels.NFILE = Nsteps/dcdfreq;
dcd.NPRIV = 1; vels.NPRIV = 1;
dcd.NSAVC = dcdfreq; vels.NSAVC = dcdfreq;
dcd.DELTA = DT; vels.DELTA = DT;
dcd.open_write("out.dcd"); vels.open_write("out.dcd");
dcd.write_header(); vels.write_header();
dcd.allocate(); vels.allocate();
std::cout<<"Starting simulation with "<<part.c.d.size()<<" particles."<<std::endl;
initTimer();
for(unsigned int step = 0; step < Nsteps; step++)
{
if(step%pairsfreq == 0)
{
hipLaunchKernelGGL(( pairlistGPU), dim3((in_xyz.atomCount)/BLOCK_SIZE+1), dim3(BLOCK_SIZE), 0, 0, d_part, in_xyz.atomCount);
}
hipLaunchKernelGGL(( computeHarmPairlist), dim3((in_xyz.atomCount)/BLOCK_SIZE+1), dim3(BLOCK_SIZE), 0, 0, d_part, in_xyz.atomCount);
hipLaunchKernelGGL(( find_nearest), dim3((in_xyz.atomCount)/BLOCK_SIZE+1), dim3(BLOCK_SIZE), 0, 0, d_part, in_xyz.atomCount);
checkCUDAError();
// part.nearest.d2h();
// for(unsigned int i = 0; i < in_xyz.atomCount; i++)
// {
// std::cout<<i<<" "<<part.nearest.h[i]<<" "<<part.nearest.h[part.nearest.h[i]]<<std::endl;
// }
// exit(0);
hipLaunchKernelGGL(( perform_collisions), dim3((in_xyz.atomCount)/BLOCK_SIZE+1), dim3(BLOCK_SIZE), 0, 0, d_part, devState, in_xyz.atomCount);
checkCUDAError();
hipLaunchKernelGGL(( integrate), dim3((in_xyz.atomCount)/BLOCK_SIZE+1), dim3(BLOCK_SIZE), 0, 0, d_part, in_xyz.atomCount);
if(step%dcdfreq == 0)
{
for(unsigned int i = 0; i < 3*in_xyz.atomCount; i++)
{
dcd.X[i] = 0; vels.X[i] = 0;
dcd.Y[i] = 0; vels.Y[i] = 0;
dcd.Z[i] = 0; vels.Z[i] = 0;
}
int c_a = 0;
int c_b = in_xyz.atomCount;
int c_c = 2*in_xyz.atomCount;
part.c.d2h();
part.v.d2h();
part.t.d2h();
double E = 0;
for(unsigned int i = 0; i < in_xyz.atomCount; i++)
{
int offset;
if(part.t.h[i] == TYPEA)
{
offset = c_a;
c_a += 1;
}
if(part.t.h[i] == TYPEB)
{
offset = c_b;
c_b += 1;
}
if(part.t.h[i] == TYPEC)
{
offset = c_c;
c_c += 1;
}
dcd.X[offset] = part.c.h[i].x; vels.X[offset] = part.v.h[i].x;
dcd.Y[offset] = part.c.h[i].y; vels.Y[offset] = part.v.h[i].y;
dcd.Z[offset] = part.c.h[i].z; vels.Z[offset] = part.v.h[i].z;
double v2 = 0;
if (part.t.h[i] != TYPENONE)
{
float m = ind2mass[part.t.h[i]];
v2 = vels.X[offset]*vels.X[offset];
v2 += vels.Y[offset]*vels.Y[offset];
v2 += vels.Z[offset]*vels.Z[offset];
E += m*v2/2;
}
}
E = E/(c_a+c_b+c_c - 3*in_xyz.atomCount);
std::cout<<step<<" "<<c_a<<" "<<c_b-in_xyz.atomCount<<" "<<c_c-in_xyz.atomCount*2<<" "<<2.0*E/3.0/8.314<<std::endl;
dcd.write_frame(); vels.write_frame();
printTime(step);
}
}
dcd.close();
vels.close();
return 0;
}
| 0aedaa1c331f127152a6fadd1c2c6d3e76ab6090.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <map>
#include <math.h>
#include "xyzio.h"
#include "dcdio.h"
#include <curand.h>
#include <curand_kernel.h>
#include <float.h>
#include "vec_utils.h"
#include "timer.h"
#define R_ACT (0.1f)
#define E_ACT (5000.0f) //J/mol
#define DH (5000.0f) //J/mol
#define DT (2.4e-6) //nsec
#define LIMIT (100.0f) //nm
#define PAIRSCUTOFF (16.0f)
#define MAXPAIRS (1000)
#define HARMCUTOFF (1.0f)
#define HARMSCALE (5000.0f)
#define TYPENONE (0)
#define TYPEA (1)
#define TYPEB (2)
#define TYPEC (3)
#define BLOCK_SIZE (256)
template <typename T>
class hd_vector {
public:
void resize(size_t len_) {
h.resize(len_);
d.resize(len_);
}
void h2d() { d = h; }
void d2h() { h = d; }
T* d_ptr() { return thrust::raw_pointer_cast(d.data()); }
T* h_ptr() { return thrust::raw_pointer_cast(h.data()); }
thrust::host_vector<T> h;
thrust::device_vector<T> d;
};
typedef struct
{
hd_vector<float3> c;
hd_vector<float3> v;
hd_vector<float> m;
hd_vector<int> t;
hd_vector<int> nearest;
hd_vector<float> nearest_dist;
} Particles;
typedef struct
{
float3 *c;
float3 *v;
float *m;
int *t;
int *nearest;
float *nearest_dist;
float3 *f;
int *pairsCount;
int *pairs;
} d_Particles;
void checkCUDAError(){
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error: %s \n", cudaGetErrorString(error));
exit(0);
}
}
__global__ void find_nearest(d_Particles parts, int atom_count)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < atom_count)
{
parts.nearest_dist[i] = FLT_MAX;
parts.nearest[i] = -1;
float3 pi = parts.c[i];
for(unsigned int p = 0; p < parts.pairsCount[i]; p++)
{
int j = parts.pairs[p*atom_count+i];
if( (parts.t[i] != parts.t[j]) && (parts.t[j] != TYPENONE) && (i != j))
{
// if(parts.nearest[i] >= 0)
{
float3 pj = parts.c[j];
float dist = getDistance(pi,pj,LIMIT);
if(parts.nearest_dist[i] > dist)
{
parts.nearest_dist[i] = dist;
parts.nearest[i] = j;
}
}
}
}
}
}
__global__ void pairlistGPU(d_Particles parts, int atom_count)
{
int i, j;
i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < atom_count)
{
float3 ri = parts.c[i];
int pairsCount = 0;
for(j = 0; j < atom_count; j++)
{
if ( (j != i) && (parts.t[j] != TYPENONE) )
{
float3 rj = parts.c[j];
if( (getDistance(ri, rj, LIMIT) < PAIRSCUTOFF) && pairsCount < MAXPAIRS)
{
parts.pairs[pairsCount*atom_count + i] = j;
pairsCount ++;
}
}
}
parts.pairsCount[i] = pairsCount;
}
}
__global__ void perform_collisions(d_Particles parts, curandState *state, int atom_count)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < atom_count)
{
// if( (parts.nearest_dist[i] < R_ACT) && (parts.nearest[parts.nearest[i]] == i) )
if(parts.nearest[i] >= 0)
{
if( (parts.nearest_dist[i] < R_ACT) && (parts.nearest[parts.nearest[i]] == i) && (parts.t[i] == TYPEA) && (parts.t[parts.nearest[i]] == TYPEB) && (parts.t[i] != TYPENONE) )
{
int j = parts.nearest[i];
float3 vi = parts.v[i];
float ei = parts.m[i]*(vi.x*vi.x+vi.y*vi.y+vi.z*vi.z)/2;
float3 vj = parts.v[j];
float ej = parts.m[j]*(vj.x*vj.x+vj.y*vj.y+vj.z*vj.z)/2;
if(ei + ej > E_ACT)
{
float scale = sqrt(2*(ei+ej-E_ACT+DH)/(parts.m[i]+parts.m[j]));
float phi = 2*M_PI*(float)curand_uniform(&state[i]);
float teta = M_PI*((float)curand_uniform(&state[i]) - 0.5);
parts.v[j].x = sinf(teta)*cosf(phi)*scale;
parts.v[j].y = sinf(teta)*sin(phi)*scale;
parts.v[j].z = cosf(teta)*scale;
parts.t[j] = 3;
//destroy A
parts.c[i].x = 0;
parts.c[i].y = 0;
parts.c[i].z = 0;
parts.v[i].x = 0;
parts.v[i].y = 0;
parts.v[i].z = 0;
parts.t[i] = 0;
}
}
}
}
}
__global__ void integrate(d_Particles parts, int atom_count)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < atom_count)
{
if(parts.t[i] != TYPENONE)
{
parts.c[i].x += parts.v[i].x*DT;
parts.c[i].y += parts.v[i].y*DT;
parts.c[i].z += parts.v[i].z*DT;
parts.c[i] = transferPBC(parts.c[i], LIMIT);
parts.v[i].x += parts.f[i].x*DT/parts.m[i];
parts.v[i].y += parts.f[i].y*DT/parts.m[i];
parts.v[i].z += parts.f[i].z*DT/parts.m[i];
parts.f[i].x = 0;
parts.f[i].y = 0;
parts.f[i].z = 0;
if( (!isfinite(parts.c[i].x)) || (!isfinite(parts.c[i].y)) || (!isfinite(parts.c[i].z)))
{
parts.t[i] = TYPENONE;
}
}
}
}
__global__ void initCurand(curandState *state, unsigned long seed)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, idx, idx, &state[idx]);
}
__global__ void computeLJPairlist(float3* r, float3* f, int* type, int N, int* d_pairsCount, int* d_pairs)
{
int i, j, p;
i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
float3 fi = f[i];
float3 ri = r[i];
for(p = 0; p < d_pairsCount[i]; p++)
{
j = d_pairs[p*N + i];
if(type[j] != TYPENONE)
{
float3 rj = r[j];
float Bij = 1.0f;
float epsilon = 3e-2f;
float sigma = 2e-1f;
float3 rij = getVector(ri, rj, LIMIT);
float rijmod2 = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z;
float srij2 = sigma*sigma/rijmod2;
float srij6 = srij2*srij2*srij2;
float df = -6.0f*epsilon*srij6*Bij/rijmod2;
if(!isfinite(df))
{
df = 0.0f;
}
fi.x += df*rij.x;
fi.y += df*rij.y;
fi.z += df*rij.z;
}
}
f[i] = fi;
if(!isfinite(f[i].x))
f[i].x = 0.0f;
if(!isfinite(f[i].y))
f[i].y = 0.0f;
if(!isfinite(f[i].z))
f[i].z = 0.0f;
}
}
__global__ void computeHarmPairlist(d_Particles parts, int atom_count)
{
int i, j, p;
i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < atom_count)
{
float3 fi = parts.f[i];
float3 ri = parts.c[i];
for(p = 0; p < parts.pairsCount[i]; p++)
{
j = parts.pairs[p*atom_count + i];
if(parts.t[j] != TYPENONE)
{
float3 rj = parts.c[j];
float3 rij = getVector(ri, rj, LIMIT);
float rijmod2 = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z;
float rijmod = sqrtf(rijmod2);
float df = 0.0f;
if(rijmod <= HARMCUTOFF)
{
df += HARMSCALE*(rijmod - HARMCUTOFF)*(rijmod - HARMCUTOFF);
}
if(!isfinite(df))
{
df = 0.0f;
}
fi.x += df*rij.x;
fi.y += df*rij.y;
fi.z += df*rij.z;
}
}
parts.f[i] = fi;
if(!isfinite(parts.f[i].x))
parts.f[i].x = 0.0f;
if(!isfinite(parts.f[i].y))
parts.f[i].y = 0.0f;
if(!isfinite(parts.f[i].z))
parts.f[i].z = 0.0f;
}
}
int main(int argc, char **argv)
{
cudaSetDevice(1);
std::map<char, int> name2ind;
name2ind['H'] = TYPEA;
name2ind['O'] = TYPEB;
name2ind['N'] = TYPEC;
std::map<int, char> ind2name;
ind2name[TYPEA] = 'H';
ind2name[TYPEB] = 'O';
ind2name[TYPEC] = 'N';
std::map<char, float> name2mass;
name2mass['H'] = 0.012;
name2mass['O'] = 0.016;
name2mass['N'] = 0.028;
std::map<int, float> ind2mass;
ind2mass[TYPEA] = 0.012;
ind2mass[TYPEB] = 0.016;
ind2mass[TYPEC] = 0.028;
XYZ in_xyz;
readXYZ(argv[1], &in_xyz);
Particles part;
std::cout<<"Total particles "<<in_xyz.atomCount<<std::endl;
part.c.resize(in_xyz.atomCount);
part.v.resize(in_xyz.atomCount);
part.m.resize(in_xyz.atomCount);
part.t.resize(in_xyz.atomCount);
part.nearest.resize(in_xyz.atomCount);
part.nearest_dist.resize(in_xyz.atomCount);
for(unsigned int i = 0; i < in_xyz.atomCount; i++)
{
part.t.h[i] = name2ind[in_xyz.atoms[i].name];
part.m.h[i] = name2mass[in_xyz.atoms[i].name];
part.c.h[i].x = in_xyz.atoms[i].x;
part.c.h[i].y = in_xyz.atoms[i].y;
part.c.h[i].z = in_xyz.atoms[i].z;
part.nearest.h[i] = -1;
}
readXYZ(argv[2], &in_xyz);
for(unsigned int i = 0; i < in_xyz.atomCount; i++)
{
part.v.h[i].x = in_xyz.atoms[i].x;
part.v.h[i].y = in_xyz.atoms[i].y;
part.v.h[i].z = in_xyz.atoms[i].z;
}
part.c.h2d();
part.v.h2d();
part.m.h2d();
part.t.h2d();
part.nearest.h2d();
part.nearest_dist.h2d();
d_Particles d_part;
d_part.c = part.c.d_ptr();
d_part.v = part.v.d_ptr();
d_part.m = part.m.d_ptr();
d_part.t = part.t.d_ptr();
d_part.nearest = part.nearest.d_ptr();
d_part.nearest_dist = part.nearest_dist.d_ptr();
cudaMalloc((void**)&(d_part.f), in_xyz.atomCount * sizeof(float3));
cudaMalloc((void**)&(d_part.pairsCount), in_xyz.atomCount * sizeof(int));
cudaMalloc((void**)&(d_part.pairs), in_xyz.atomCount* MAXPAIRS * sizeof(int));
checkCUDAError();
curandState *devState;
cudaMalloc((void**)&devState, in_xyz.atomCount * sizeof(curandState));
initCurand<<<(in_xyz.atomCount)/BLOCK_SIZE+1, BLOCK_SIZE>>>(devState, 100500);
cudaDeviceSynchronize();
DCD dcd;
DCD vels;
int Nsteps = 25000000;
int pairsfreq = 100;
int dcdfreq = 10;
dcd.N = 3*in_xyz.atomCount; vels.N = 3*in_xyz.atomCount;
dcd.NFILE = Nsteps/dcdfreq; vels.NFILE = Nsteps/dcdfreq;
dcd.NPRIV = 1; vels.NPRIV = 1;
dcd.NSAVC = dcdfreq; vels.NSAVC = dcdfreq;
dcd.DELTA = DT; vels.DELTA = DT;
dcd.open_write("out.dcd"); vels.open_write("out.dcd");
dcd.write_header(); vels.write_header();
dcd.allocate(); vels.allocate();
std::cout<<"Starting simulation with "<<part.c.d.size()<<" particles."<<std::endl;
initTimer();
for(unsigned int step = 0; step < Nsteps; step++)
{
if(step%pairsfreq == 0)
{
pairlistGPU<<<(in_xyz.atomCount)/BLOCK_SIZE+1, BLOCK_SIZE>>>(d_part, in_xyz.atomCount);
}
computeHarmPairlist<<<(in_xyz.atomCount)/BLOCK_SIZE+1, BLOCK_SIZE>>>(d_part, in_xyz.atomCount);
find_nearest<<<(in_xyz.atomCount)/BLOCK_SIZE+1, BLOCK_SIZE>>>(d_part, in_xyz.atomCount);
checkCUDAError();
// part.nearest.d2h();
// for(unsigned int i = 0; i < in_xyz.atomCount; i++)
// {
// std::cout<<i<<" "<<part.nearest.h[i]<<" "<<part.nearest.h[part.nearest.h[i]]<<std::endl;
// }
// exit(0);
perform_collisions<<<(in_xyz.atomCount)/BLOCK_SIZE+1, BLOCK_SIZE>>>(d_part, devState, in_xyz.atomCount);
checkCUDAError();
integrate<<<(in_xyz.atomCount)/BLOCK_SIZE+1, BLOCK_SIZE>>>(d_part, in_xyz.atomCount);
if(step%dcdfreq == 0)
{
for(unsigned int i = 0; i < 3*in_xyz.atomCount; i++)
{
dcd.X[i] = 0; vels.X[i] = 0;
dcd.Y[i] = 0; vels.Y[i] = 0;
dcd.Z[i] = 0; vels.Z[i] = 0;
}
int c_a = 0;
int c_b = in_xyz.atomCount;
int c_c = 2*in_xyz.atomCount;
part.c.d2h();
part.v.d2h();
part.t.d2h();
double E = 0;
for(unsigned int i = 0; i < in_xyz.atomCount; i++)
{
int offset;
if(part.t.h[i] == TYPEA)
{
offset = c_a;
c_a += 1;
}
if(part.t.h[i] == TYPEB)
{
offset = c_b;
c_b += 1;
}
if(part.t.h[i] == TYPEC)
{
offset = c_c;
c_c += 1;
}
dcd.X[offset] = part.c.h[i].x; vels.X[offset] = part.v.h[i].x;
dcd.Y[offset] = part.c.h[i].y; vels.Y[offset] = part.v.h[i].y;
dcd.Z[offset] = part.c.h[i].z; vels.Z[offset] = part.v.h[i].z;
double v2 = 0;
if (part.t.h[i] != TYPENONE)
{
float m = ind2mass[part.t.h[i]];
v2 = vels.X[offset]*vels.X[offset];
v2 += vels.Y[offset]*vels.Y[offset];
v2 += vels.Z[offset]*vels.Z[offset];
E += m*v2/2;
}
}
E = E/(c_a+c_b+c_c - 3*in_xyz.atomCount);
std::cout<<step<<" "<<c_a<<" "<<c_b-in_xyz.atomCount<<" "<<c_c-in_xyz.atomCount*2<<" "<<2.0*E/3.0/8.314<<std::endl;
dcd.write_frame(); vels.write_frame();
printTime(step);
}
}
dcd.close();
vels.close();
return 0;
}
|
cd2673fe37cb9cc97fb704f34e3ff8a96afdc63a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Number of SM: %d\n", prop.multiProcessorCount);
printf(" Shared memory of each Thread Block: %f KB\n", prop.sharedMemPerBlock / 1024.0);
printf(" Maximum Threads of each Thread Block: %d\n", prop.maxThreadsPerBlock);
printf(" Maximum Threads Per Multi-Processor: %d\n", prop.maxThreadsPerMultiProcessor);
printf(" Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
| cd2673fe37cb9cc97fb704f34e3ff8a96afdc63a.cu | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Number of SM: %d\n", prop.multiProcessorCount);
printf(" Shared memory of each Thread Block: %f KB\n", prop.sharedMemPerBlock / 1024.0);
printf(" Maximum Threads of each Thread Block: %d\n", prop.maxThreadsPerBlock);
printf(" Maximum Threads Per Multi-Processor: %d\n", prop.maxThreadsPerMultiProcessor);
printf(" Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
c90ddf5257b72a6b2f49dd5ecdcba36634887bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled.
///////////For P100, L2 cache prefetch exists. L1 cache prefetch does not.
///////////The data found in cache depends on the data size. So the prefetch is caused by the memcpy which goes through the L2 as well.
///////////Since 8mb (largest data size used) is far less to saturate the tlbs, if tlbs show misses using it, it means that just L1 or L1 and L2 tlbs does not prefetch.
//typedef unsigned char byte;
void init_cpu_data(int* A, int size, int stride, int mod){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.s64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.s32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, iterations, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
for(int data_stride = 32; data_stride <= 32; data_stride = data_stride + 1){/////////stride shall be L1 cache line size.
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////pascal L2 4m /////pascal L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 8;/////8mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################8mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////pascal L2 4m /////pascal L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 4;/////4mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################4mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 2;/////2mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################2mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 1.5;/////1.5mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################1.5mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256;/////1mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################1mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 8;/////32kb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################32kb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 4;/////16kb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################16kb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 2;/////8kb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################8kb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| c90ddf5257b72a6b2f49dd5ecdcba36634887bfe.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled.
///////////For P100, L2 cache prefetch exists. L1 cache prefetch does not.
///////////The data found in cache depends on the data size. So the prefetch is caused by the memcpy which goes through the L2 as well.
///////////Since 8mb (largest data size used) is far less to saturate the tlbs, if tlbs show misses using it, it means that just L1 or L1 and L2 tlbs does not prefetch.
//typedef unsigned char byte;
void init_cpu_data(int* A, int size, int stride, int mod){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (int it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.s64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.s32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, iterations, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
for(int data_stride = 32; data_stride <= 32; data_stride = data_stride + 1){/////////stride shall be L1 cache line size.
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////pascal L2 4m /////pascal L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 8;/////8mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################8mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////pascal L2 4m /////pascal L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 4;/////4mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################4mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 2;/////2mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################2mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256 * 1.5;/////1.5mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################1.5mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 256;/////1mb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################1mb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 8;/////32kb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################32kb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 4;/////16kb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################16kb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(int mod = 1024 * 2; mod <= 1024 * 2; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 1024 * 2;/////8kb.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256 ////////we only need to see the first time if it is prefetched or not.
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "############data_size%d#########################8kb\n", data_size);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < iterations; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
1844ee7195c16148424c8aad0cedffddcd94d457.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void getSumSquares(float* X, float* XSqr, int dim, int dimpow2) {
extern __shared__ float x[];
int i, k;
int offset = blockIdx.x*dim;
int jump = dimpow2;
int sumjump = jump >> 1;
//Step 0: Figure out K (number of batches per block)
int K = dimpow2 >> 9;
if (K == 0) {
K = 1;
}
if (jump > 512) {
jump = 512;
}
//Step 1: Copy over each row to shared memory
//and square in the process
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
if (i < dim) {
x[i] = X[offset + i]*X[offset + i];
}
else if (i < dimpow2) {
x[i] = 0.0;
}
}
__syncthreads();
//Step 2: Perform sums
while (sumjump > 0) {
if (threadIdx.x < sumjump) {
K = sumjump >> 9;
if (K == 0) {
K = 1;
}
jump = sumjump;
if (jump > 512) {
jump = 512;
}
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
x[i] += x[i + sumjump];
}
}
sumjump = sumjump >> 1;
__syncthreads();
}
//Step 3: Copy back results
XSqr[blockIdx.x] = x[0];
}
//CSM is N x M
__global__ void finishCSM(float* CSM, float* XSqr, float* YSqr, int N, int M, int MPow2) {
int offset = blockIdx.x*M;
int K = MPow2 >> 9;
int i;
int k, jump = MPow2;
float val = 0.0;
if (K == 0) {
K = 1;
}
if (jump > 512) {
jump = 512;
}
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
if (i < M) {
val = XSqr[i] + YSqr[blockIdx.x];
val = val - 2*CSM[offset + i];
if (val < 0) {
val = 0;
}
CSM[offset + i] = sqrt(val);
}
}
}
| 1844ee7195c16148424c8aad0cedffddcd94d457.cu | __global__ void getSumSquares(float* X, float* XSqr, int dim, int dimpow2) {
extern __shared__ float x[];
int i, k;
int offset = blockIdx.x*dim;
int jump = dimpow2;
int sumjump = jump >> 1;
//Step 0: Figure out K (number of batches per block)
int K = dimpow2 >> 9;
if (K == 0) {
K = 1;
}
if (jump > 512) {
jump = 512;
}
//Step 1: Copy over each row to shared memory
//and square in the process
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
if (i < dim) {
x[i] = X[offset + i]*X[offset + i];
}
else if (i < dimpow2) {
x[i] = 0.0;
}
}
__syncthreads();
//Step 2: Perform sums
while (sumjump > 0) {
if (threadIdx.x < sumjump) {
K = sumjump >> 9;
if (K == 0) {
K = 1;
}
jump = sumjump;
if (jump > 512) {
jump = 512;
}
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
x[i] += x[i + sumjump];
}
}
sumjump = sumjump >> 1;
__syncthreads();
}
//Step 3: Copy back results
XSqr[blockIdx.x] = x[0];
}
//CSM is N x M
__global__ void finishCSM(float* CSM, float* XSqr, float* YSqr, int N, int M, int MPow2) {
int offset = blockIdx.x*M;
int K = MPow2 >> 9;
int i;
int k, jump = MPow2;
float val = 0.0;
if (K == 0) {
K = 1;
}
if (jump > 512) {
jump = 512;
}
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
if (i < M) {
val = XSqr[i] + YSqr[blockIdx.x];
val = val - 2*CSM[offset + i];
if (val < 0) {
val = 0;
}
CSM[offset + i] = sqrt(val);
}
}
}
|
108ad98a5a3e7caf21e603dc9bd1506671692e10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
| 108ad98a5a3e7caf21e603dc9bd1506671692e10.cu | #include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
5b9ebd19828e415c6fd8194921e558dc35cffc1a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <algorithm>
#include <vector>
#include <cusp/complex.h>
#include <cusp/blas.h>
#include <hipcub/hipcub.hpp>
#include <thrust/reduce.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "hip/hip_runtime.h"
#include "polargrid.h"
void error_handle(hipError_t status = hipErrorLaunchFailure);
void error_handle(hipError_t status){
if(status != hipSuccess){
hipError_t s= hipGetLastError();
if(s != hipSuccess){
// printf("%s\n",hipGetErrorString(s));
exit(1);
}
}
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size, float kb_table_scale,hipTextureObject_t texRef){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
//float w=tex1D<float>(texRef,0.0f);
// return w;//tex1D<float>(texRef,dist_y);// *tex1D<float>(texRef,dist_y);
// return 1.0f;
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
(tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
}
return 0.0f;
/* */
}
__device__ float kb_weight(float grid_x, float grid_y, float point_pos_x,
float point_pos_y,
int kb_table_size,
float kb_table_scale,hipTextureObject_t texRef){
float dist_x = fabsf(grid_x-point_pos_x)*kb_table_scale;
float dist_y = fabsf(grid_y-point_pos_y)*kb_table_scale;
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
(tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
}
return 0.0f;
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size,
float kb_table_scale,int tid,hipTextureObject_t texRef){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
// return 0.0f;
return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
/*
float ix = rintf(dist_x);
float fx = dist_x-ix;
float iy = rintf(dist_y);
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (tex1Dfetch<float>(texRef,tid)*(1.0f-fx) + tex1Dfetch<float>(texRef,tid)*(fx)) *
(tex1Dfetch<float>(texRef,tid)*(1.0f-fy) + tex1Dfetch<float>(texRef,tid)*(fy));
}
return 0.0f;
*/
}
__global__ void sum_points_large(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, hipTextureObject_t texRef,
cusp::complex<float> * grid_value, int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;//blockIdx.x;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
//const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
// Lets try to load things to shared memory /
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
int b = 4;
for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
sum_t[tid] = 0;
//sum_i[tid] = 0;
for(int j = (tid&(b-1));j<ppb;j+=b){
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef);
sum_t[tid] += point_value_cache[j]*w;
}
// Do a reduce in shared memory
for(unsigned int j=1; j < b; j = (j << 1)) {
// modulo arithmetic is slow!
if ((tid & ((j<<1)-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if((tid&(b-1)) == 0){
grid_value[y*grid_size.x+x] = sum_t[tid];
}
}
}
__global__ void sum_points_mid(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, hipTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
// const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
// small bin or large no of samples
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
// Lets try to load all points to shared memory /
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
// loop through dimensions
for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
cusp::complex<float> my_sum = 0;
for(int j = 0;j<ppb;j++){ //loop through all the points
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef);
my_sum += point_value_cache[j]*w;
}
grid_value[y*grid_size.x+x] += my_sum;
}
}
//------------------------------
__global__ void sum_points_old(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, hipTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
//const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
__shared__ cusp::complex<float> sum_t[bd];
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
for(int j = tid;j<ppb;j+=bd){
sum_t[tid] += point_value[binned_points[idx+j]]*
kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],binned_points_y[idx+j]),
kb_table_size,kb_table_scale,texRef);
}
// __syncthreads();
for(unsigned int j=1; j < bd; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; }
__syncthreads();
}
if(tid == 0){grid_value[y*grid_size.x+x]+=sum_t[0]; }
}
}
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void sum_points(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, hipTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
//const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
__shared__ cusp::complex<float> sum_t[bd];
int j=tid+bd*blockIdx.x;
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
// for(int j = tid;j<ppb;j+=bd){
if(j<ppb){
sum_t[tid] += point_value[binned_points[idx+j]]*
kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],binned_points_y[idx+j]),
kb_table_size,kb_table_scale,texRef);
}//else{ sum_t[tid] = 0; }
// __syncthreads();
for(unsigned int j=1; j < bd; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; }
__syncthreads();
}
if(tid == 0){grid_value[y*grid_size.x+x]+=sum_t[0]; }
}
}
}
//------------------------------
__global__ void grid_points_cuda_mex_interleaved_kernel(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, hipTextureObject_t texRef,
cusp::complex<float> * grid_value){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
int i = blockIdx.x;
// int tid = threadIdx.x;
// small bin or large no of samples
if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){
// sum_points<<<points_per_bin,BLOCKSIZE>>>
// sum_points<<<points_per_bin[i]/BLOCKSIZE,BLOCKSIZE>>>
hipLaunchKernelGGL(( sum_points_old), dim3(1),dim3(BLOCKSIZE), 0, 0,
point_x, point_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value, blockIdx.x);
}else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) {
hipLaunchKernelGGL(( sum_points_mid), dim3(1),dim3(BLOCKSIZE), 0, 0, point_x, point_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value, blockIdx.x);
}else{ //small dimension and few points
hipLaunchKernelGGL(( sum_points_large), dim3(1),dim3(BLOCKSIZE), 0, 0, point_x, point_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value, blockIdx.x);
}
}
//--------------------------------
void grid_points_cuda_interleaved_mex(const float * point_pos_x, const float * point_pos_y,
const cusp::complex<float> * point_value, int npoints,
uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points, const int * binned_points_idx, const int * bin_location,
const float * binned_points_x, const float * binned_points_y,
int nbins,
const float * kb_table,
const int kb_table_size, const float kb_table_scale, cusp::complex<float> * grid_value){
hipMemset(grid_value,0,sizeof(float2)*grid_size.x*grid_size.y);
int grid = nbins;
// int block_size = BLOCKSIZE;
clock_t t_i = clock();
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
if(0){
hipChannelFormatDesc channelDesc=hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat );
hipArray * cuArray;
hipMallocArray(&cuArray, &channelDesc, kb_table_size, 1);
hipMemcpyToArray ( cuArray, 0,0,kb_table,kb_table_size*sizeof(float) , hipMemcpyDeviceToDevice);
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuArray;
}
else{
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = (void *)kb_table;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = kb_table_size*sizeof(float);
}
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.readMode = hipReadModeElementType;
texDesc.filterMode = hipFilterModeLinear;
texDesc.normalizedCoords = 1;
// create texture object: we only have to do this once!
hipTextureObject_t texRef=0;
hipCreateTextureObject(&texRef, &resDesc, &texDesc, NULL);
//--------------------------------------------
hipLaunchKernelGGL(( grid_points_cuda_mex_interleaved_kernel), dim3(grid),dim3(1), 0, 0, point_pos_x, point_pos_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value);
hipDeviceSynchronize();
// hipFreeArray(cuArray);
clock_t t_e = clock();
error_handle();
// printf("%d iter in %5.1f ms\n",iter,(t_e-t_i)*1000.0/CLOCKS_PER_SEC);
}
#define SX prhs[0]
#define SY prhs[1]
#define SV prhs[2]
#define GRID_DIM prhs[3]
#define SPB prhs[4]
#define BIN_DIM_X prhs[5]
#define BIN_DIM_Y prhs[6]
#define SIB prhs[7]
#define BSO prhs[8]
#define BL prhs[9]
#define BPX prhs[10]
#define BPY prhs[11]
#define KLUT prhs[12]
#define KLUTS prhs[13]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
/* Initialize the MathWorks GPU API. */
mxInitGPU();
mxGPUArray const *samples_x;
mxGPUArray const *samples_y;
mxGPUArray const *samples_values;
mxGPUArray const *samples_per_bin;
mxGPUArray const *bin_dimensions_x;
mxGPUArray const *bin_dimensions_y;
mxGPUArray const *samples_in_bin;
mxGPUArray const *bin_start_offset;
mxGPUArray const *bin_location;
mxGPUArray const *bin_points_x;
mxGPUArray const *bin_points_y;
mxGPUArray const *kernel_lookup_table;
//int *grid_dim =(int *) mxGetPr(GRID_DIM);
float kernel_lookup_table_scale = mxGetScalar(KLUTS);
int *grid_dim0=( int *) (mxGetData(GRID_DIM));
mwSize *grid_dim=(mwSize *)grid_dim0;
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim[0]),(grid_dim[1]));
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim1[0]),(grid_dim1[1]));
// OUTPUT
mxGPUArray *grid_values;
samples_x = mxGPUCreateFromMxArray(SX);
samples_y = mxGPUCreateFromMxArray(SY);
samples_values = mxGPUCreateFromMxArray(SV);
samples_per_bin = mxGPUCreateFromMxArray(SPB);
bin_dimensions_x = mxGPUCreateFromMxArray(BIN_DIM_X);
bin_dimensions_y = mxGPUCreateFromMxArray(BIN_DIM_Y);
samples_in_bin = mxGPUCreateFromMxArray(SIB);
bin_start_offset = mxGPUCreateFromMxArray(BSO);
bin_location = mxGPUCreateFromMxArray(BL);
bin_points_x = mxGPUCreateFromMxArray(BPX);
bin_points_y = mxGPUCreateFromMxArray(BPY);
kernel_lookup_table= mxGPUCreateFromMxArray(KLUT);
int nbins = (int) (mxGPUGetNumberOfElements(bin_dimensions_x));
int npoints = (int)(mxGPUGetNumberOfElements(samples_x));
int kernel_lookup_table_size = ( int)(mxGPUGetNumberOfElements(kernel_lookup_table));
mwSize ndim= 2;
// output:
// float2 * grid_values;
// float2 * gold_grid_values;
// plhs[0] = jkt_new( grid_dim[0], grid_dim[1], mxSINGLE_CLASS, mxREAL,);
//grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
// now get the pointer or whatever it is
const float *d_samples_x = (const float *)(mxGPUGetDataReadOnly(samples_x));
const float *d_samples_y = (const float *)(mxGPUGetDataReadOnly(samples_y));
// float2 *d_samples_values = (float2 *)(const float2 *)(mxGPUGetDataReadOnly(samples_values));
const cusp::complex<float> *d_samples_values = (const cusp::complex<float> *)(mxGPUGetDataReadOnly(samples_values));
const int * d_samples_per_bin = (const int *)(mxGPUGetDataReadOnly(samples_per_bin));
const int * d_bin_dimensions_x = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_x));
const int * d_bin_dimensions_y = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_y));
const int * d_samples_in_bin = (const int *)(mxGPUGetDataReadOnly(samples_in_bin));
const int * d_bin_start_offset =(const int *)(mxGPUGetDataReadOnly(bin_start_offset));
const int * d_bin_location = (const int *)(mxGPUGetDataReadOnly(bin_location));
const float * d_bin_points_x = (const float *)(mxGPUGetDataReadOnly(bin_points_x));
const float * d_bin_points_y = (const float *)(mxGPUGetDataReadOnly(bin_points_y));
float * d_kernel_lookup_table = ( float *)(mxGPUGetDataReadOnly(kernel_lookup_table));
const uint2 grid_size = {grid_dim[0],grid_dim[1]};
//float2 * d_grid_values = (float2 *)(mxGPUGetData(grid_values));
cusp::complex<float> * d_grid_values = (cusp::complex<float> *)(mxGPUGetData(grid_values));
//--------------------------------------------
/*
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = d_kernel_lookup_table;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = kernel_lookup_table_size*sizeof(float);
//resDesc.resType = hipResourceTypeArray;
// resDesc.res.array.array = (hipArray* ) d_kernel_lookup_table;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.readMode = hipReadModeElementType;
texDesc.filterMode = hipFilterModeLinear;
texDesc.normalizedCoords = 1;
// create texture object: we only have to do this once!
hipTextureObject_t texRef=0;
hipCreateTextureObject(&texRef, &resDesc, &texDesc, NULL);
//--------------------------------------------
*/
//
grid_points_cuda_interleaved_mex( d_samples_x, d_samples_y,
d_samples_values, npoints,
grid_size, d_samples_per_bin, d_bin_dimensions_x, d_bin_dimensions_y,
d_samples_in_bin, d_bin_start_offset, d_bin_location,
d_bin_points_x, d_bin_points_y,
nbins, d_kernel_lookup_table,
kernel_lookup_table_size,
kernel_lookup_table_scale, d_grid_values);
//mexErrMsgTxt("gpuArray 2");
plhs[0] = mxGPUCreateMxArrayOnGPU(grid_values);
/*
*/
mxGPUDestroyGPUArray( samples_x);
mxGPUDestroyGPUArray( samples_y);
mxGPUDestroyGPUArray( samples_values);
mxGPUDestroyGPUArray( samples_per_bin);
mxGPUDestroyGPUArray( bin_dimensions_x);
mxGPUDestroyGPUArray( bin_dimensions_y);
mxGPUDestroyGPUArray( samples_in_bin);
mxGPUDestroyGPUArray( kernel_lookup_table);
mxGPUDestroyGPUArray( bin_start_offset);
mxGPUDestroyGPUArray( bin_location);
mxGPUDestroyGPUArray( bin_points_x);
mxGPUDestroyGPUArray( bin_points_y);
mxGPUDestroyGPUArray( grid_values);
}
| 5b9ebd19828e415c6fd8194921e558dc35cffc1a.cu | #include <stdlib.h>
#include <stdio.h>
#include <algorithm>
#include <vector>
#include <cusp/complex.h>
#include <cusp/blas.h>
#include <cub/cub.cuh>
#include <thrust/reduce.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "cuda.h"
#include "polargrid.h"
void error_handle(cudaError_t status = cudaErrorLaunchFailure);
void error_handle(cudaError_t status){
if(status != cudaSuccess){
cudaError_t s= cudaGetLastError();
if(s != cudaSuccess){
// printf("%s\n",cudaGetErrorString(s));
exit(1);
}
}
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size, float kb_table_scale,cudaTextureObject_t texRef){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
//float w=tex1D<float>(texRef,0.0f);
// return w;//tex1D<float>(texRef,dist_y);// *tex1D<float>(texRef,dist_y);
// return 1.0f;
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
(tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
}
return 0.0f;
/* */
}
__device__ float kb_weight(float grid_x, float grid_y, float point_pos_x,
float point_pos_y,
int kb_table_size,
float kb_table_scale,cudaTextureObject_t texRef){
float dist_x = fabsf(grid_x-point_pos_x)*kb_table_scale;
float dist_y = fabsf(grid_y-point_pos_y)*kb_table_scale;
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
int ix = (int)dist_x;
float fx = dist_x-ix;
int iy = (int)dist_y;
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (tex1Dfetch<float>(texRef,ix)*(1.0f-fx) + tex1Dfetch<float>(texRef,ix+1)*(fx)) *
(tex1Dfetch<float>(texRef,iy)*(1.0f-fy) + tex1Dfetch<float>(texRef,iy+1)*(fy));
}
return 0.0f;
}
__device__ float kb_weight(float2 grid_pos, float2 point_pos,
int kb_table_size,
float kb_table_scale,int tid,cudaTextureObject_t texRef){
float dist_x = fabsf(grid_pos.x-point_pos.x)*kb_table_scale;
float dist_y = fabsf(grid_pos.y-point_pos.y)*kb_table_scale;
// return 0.0f;
return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
// return tex1D<float>(texRef,dist_x) *tex1D<float>(texRef,dist_y);
/*
float ix = rintf(dist_x);
float fx = dist_x-ix;
float iy = rintf(dist_y);
float fy = dist_y-iy;
if(ix+1 < kb_table_size && iy+1 < kb_table_size){
return (tex1Dfetch<float>(texRef,tid)*(1.0f-fx) + tex1Dfetch<float>(texRef,tid)*(fx)) *
(tex1Dfetch<float>(texRef,tid)*(1.0f-fy) + tex1Dfetch<float>(texRef,tid)*(fy));
}
return 0.0f;
*/
}
__global__ void sum_points_large(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, cudaTextureObject_t texRef,
cusp::complex<float> * grid_value, int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;//blockIdx.x;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
//const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
__shared__ cusp::complex<float> sum_t[BLOCKSIZE];
// Lets try to load things to shared memory /
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
int b = 4;
for(int k = tid/b;k<dims.x*dims.y;k+=blockDim.x/b){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
sum_t[tid] = 0;
//sum_i[tid] = 0;
for(int j = (tid&(b-1));j<ppb;j+=b){
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef);
sum_t[tid] += point_value_cache[j]*w;
}
// Do a reduce in shared memory
for(unsigned int j=1; j < b; j = (j << 1)) {
// modulo arithmetic is slow!
if ((tid & ((j<<1)-1)) == 0) {
sum_t[tid] += sum_t[tid + j];
}
__syncthreads();
}
if((tid&(b-1)) == 0){
grid_value[y*grid_size.x+x] = sum_t[tid];
}
}
}
__global__ void sum_points_mid(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, cudaTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
// const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
// small bin or large no of samples
__shared__ float point_pos_cache_x[SHARED_SIZE];
__shared__ float point_pos_cache_y[SHARED_SIZE];
__shared__ cusp::complex<float> point_value_cache[SHARED_SIZE];
// Lets try to load all points to shared memory /
for(int j = tid;j<ppb;j+= blockDim.x){
const int point = binned_points[idx+j];
point_value_cache[j] = point_value[point];
point_pos_cache_x[j] = binned_points_x[idx+j];
point_pos_cache_y[j] = binned_points_y[idx+j];
}
__syncthreads();
// loop through dimensions
for(int k = tid;k<dims.x*dims.y;k+=blockDim.x){
const int x = (k%(dims.x))+corner.x;
const int y = (k/dims.x)+corner.y;
cusp::complex<float> my_sum = 0;
for(int j = 0;j<ppb;j++){ //loop through all the points
float w= kb_weight(x,y,point_pos_cache_x[j],point_pos_cache_y[j],kb_table_size,kb_table_scale,texRef);
my_sum += point_value_cache[j]*w;
}
grid_value[y*grid_size.x+x] += my_sum;
}
}
//------------------------------
__global__ void sum_points_old(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, cudaTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
//const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
__shared__ cusp::complex<float> sum_t[bd];
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
for(int j = tid;j<ppb;j+=bd){
sum_t[tid] += point_value[binned_points[idx+j]]*
kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],binned_points_y[idx+j]),
kb_table_size,kb_table_scale,texRef);
}
// __syncthreads();
for(unsigned int j=1; j < bd; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; }
__syncthreads();
}
if(tid == 0){grid_value[y*grid_size.x+x]+=sum_t[0]; }
}
}
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void sum_points(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, cudaTextureObject_t texRef,
cusp::complex<float> * grid_value,int pbid){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
// int i = blockIdx.x;
int i = pbid;
int tid = threadIdx.x;
uint2 corner;
corner.x = bin_location[i]%grid_size.x;
corner.y = bin_location[i]/grid_size.x;
const int idx = binned_points_idx[i];
const int ppb = points_per_bin[i];
// cusp::complex<float> * value;
const int bd=BLOCKSIZE;
// const int bd=blockDim.x;
//const uint2 dims = {bin_dimension_x[i],bin_dimension_y[i]};
__shared__ cusp::complex<float> sum_t[bd];
int j=tid+bd*blockIdx.x;
// loop through grid
for(int y = corner.y;y<corner.y+bin_dimension_x[i];y+=1){
for(int x = corner.x;x<corner.x+bin_dimension_y[i];x+=1){
sum_t[tid] = 0;
// for(int j = tid;j<ppb;j+=bd){
if(j<ppb){
sum_t[tid] += point_value[binned_points[idx+j]]*
kb_weight(make_float2(x,y),
make_float2(binned_points_x[idx+j],binned_points_y[idx+j]),
kb_table_size,kb_table_scale,texRef);
}//else{ sum_t[tid] = 0; }
// __syncthreads();
for(unsigned int j=1; j < bd; j *= 2) {
// modulo arithmetic is slow!
if ((tid & (2*j-1)) == 0) { sum_t[tid] += sum_t[tid + j]; }
__syncthreads();
}
if(tid == 0){grid_value[y*grid_size.x+x]+=sum_t[0]; }
}
}
}
//------------------------------
__global__ void grid_points_cuda_mex_interleaved_kernel(const float * point_x,
const float * point_y,
const cusp::complex<float> * point_value,
int npoints, uint2 grid_size,
const int * points_per_bin,
const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points,
const int * binned_points_idx,
const int * bin_location,
const float * binned_points_x,
const float * binned_points_y,
const int nbins,
const int kb_table_size,
const float kb_table_scale, cudaTextureObject_t texRef,
cusp::complex<float> * grid_value){
__shared__ cusp::complex<float> value;
// Specialize BlockReduce for a 1D block of 128 threads on type cusp::complex<float>
int i = blockIdx.x;
// int tid = threadIdx.x;
// small bin or large no of samples
if(bin_dimension_x[i]*bin_dimension_y[i] < 64 || points_per_bin[i] > SHARED_SIZE){
// sum_points<<<points_per_bin,BLOCKSIZE>>>
// sum_points<<<points_per_bin[i]/BLOCKSIZE,BLOCKSIZE>>>
sum_points_old<<<1,BLOCKSIZE>>>
( point_x, point_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value, blockIdx.x);
}else if(bin_dimension_x[i]*bin_dimension_y[i] >BLOCKSIZE/2-1) {
sum_points_mid<<<1,BLOCKSIZE>>>( point_x, point_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value, blockIdx.x);
}else{ //small dimension and few points
sum_points_large<<<1,BLOCKSIZE>>>( point_x, point_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value, blockIdx.x);
}
}
//--------------------------------
void grid_points_cuda_interleaved_mex(const float * point_pos_x, const float * point_pos_y,
const cusp::complex<float> * point_value, int npoints,
uint2 grid_size, const int * points_per_bin, const int * bin_dimension_x,
const int * bin_dimension_y,
const int * binned_points, const int * binned_points_idx, const int * bin_location,
const float * binned_points_x, const float * binned_points_y,
int nbins,
const float * kb_table,
const int kb_table_size, const float kb_table_scale, cusp::complex<float> * grid_value){
cudaMemset(grid_value,0,sizeof(float2)*grid_size.x*grid_size.y);
int grid = nbins;
// int block_size = BLOCKSIZE;
clock_t t_i = clock();
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
if(0){
cudaChannelFormatDesc channelDesc=cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat );
cudaArray * cuArray;
cudaMallocArray(&cuArray, &channelDesc, kb_table_size, 1);
cudaMemcpyToArray ( cuArray, 0,0,kb_table,kb_table_size*sizeof(float) , cudaMemcpyDeviceToDevice);
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
}
else{
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = (void *)kb_table;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = kb_table_size*sizeof(float);
}
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.readMode = cudaReadModeElementType;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.normalizedCoords = 1;
// create texture object: we only have to do this once!
cudaTextureObject_t texRef=0;
cudaCreateTextureObject(&texRef, &resDesc, &texDesc, NULL);
//--------------------------------------------
grid_points_cuda_mex_interleaved_kernel<<<grid,1>>>( point_pos_x, point_pos_y,
point_value, npoints, grid_size, points_per_bin,
bin_dimension_x, bin_dimension_y, binned_points,
binned_points_idx, bin_location,
binned_points_x, binned_points_y,nbins,
kb_table_size,
kb_table_scale,texRef,
grid_value);
cudaThreadSynchronize();
// cudaFreeArray(cuArray);
clock_t t_e = clock();
error_handle();
// printf("%d iter in %5.1f ms\n",iter,(t_e-t_i)*1000.0/CLOCKS_PER_SEC);
}
#define SX prhs[0]
#define SY prhs[1]
#define SV prhs[2]
#define GRID_DIM prhs[3]
#define SPB prhs[4]
#define BIN_DIM_X prhs[5]
#define BIN_DIM_Y prhs[6]
#define SIB prhs[7]
#define BSO prhs[8]
#define BL prhs[9]
#define BPX prhs[10]
#define BPY prhs[11]
#define KLUT prhs[12]
#define KLUTS prhs[13]
void mexFunction(int nlhs, mxArray * plhs[], int nrhs,const mxArray * prhs[]){
/* Initialize the MathWorks GPU API. */
mxInitGPU();
mxGPUArray const *samples_x;
mxGPUArray const *samples_y;
mxGPUArray const *samples_values;
mxGPUArray const *samples_per_bin;
mxGPUArray const *bin_dimensions_x;
mxGPUArray const *bin_dimensions_y;
mxGPUArray const *samples_in_bin;
mxGPUArray const *bin_start_offset;
mxGPUArray const *bin_location;
mxGPUArray const *bin_points_x;
mxGPUArray const *bin_points_y;
mxGPUArray const *kernel_lookup_table;
//int *grid_dim =(int *) mxGetPr(GRID_DIM);
float kernel_lookup_table_scale = mxGetScalar(KLUTS);
int *grid_dim0=( int *) (mxGetData(GRID_DIM));
mwSize *grid_dim=(mwSize *)grid_dim0;
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim[0]),(grid_dim[1]));
//mexPrintf("Grid Dimensions %d x %d\n",(grid_dim1[0]),(grid_dim1[1]));
// OUTPUT
mxGPUArray *grid_values;
samples_x = mxGPUCreateFromMxArray(SX);
samples_y = mxGPUCreateFromMxArray(SY);
samples_values = mxGPUCreateFromMxArray(SV);
samples_per_bin = mxGPUCreateFromMxArray(SPB);
bin_dimensions_x = mxGPUCreateFromMxArray(BIN_DIM_X);
bin_dimensions_y = mxGPUCreateFromMxArray(BIN_DIM_Y);
samples_in_bin = mxGPUCreateFromMxArray(SIB);
bin_start_offset = mxGPUCreateFromMxArray(BSO);
bin_location = mxGPUCreateFromMxArray(BL);
bin_points_x = mxGPUCreateFromMxArray(BPX);
bin_points_y = mxGPUCreateFromMxArray(BPY);
kernel_lookup_table= mxGPUCreateFromMxArray(KLUT);
int nbins = (int) (mxGPUGetNumberOfElements(bin_dimensions_x));
int npoints = (int)(mxGPUGetNumberOfElements(samples_x));
int kernel_lookup_table_size = ( int)(mxGPUGetNumberOfElements(kernel_lookup_table));
mwSize ndim= 2;
// output:
// float2 * grid_values;
// float2 * gold_grid_values;
// plhs[0] = jkt_new( grid_dim[0], grid_dim[1], mxSINGLE_CLASS, mxREAL,);
//grid_values= mxGPUCreateGPUArray(ndim,grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
grid_values= mxGPUCreateGPUArray(ndim, grid_dim,mxSINGLE_CLASS,mxCOMPLEX, MX_GPU_DO_NOT_INITIALIZE);
// now get the pointer or whatever it is
const float *d_samples_x = (const float *)(mxGPUGetDataReadOnly(samples_x));
const float *d_samples_y = (const float *)(mxGPUGetDataReadOnly(samples_y));
// float2 *d_samples_values = (float2 *)(const float2 *)(mxGPUGetDataReadOnly(samples_values));
const cusp::complex<float> *d_samples_values = (const cusp::complex<float> *)(mxGPUGetDataReadOnly(samples_values));
const int * d_samples_per_bin = (const int *)(mxGPUGetDataReadOnly(samples_per_bin));
const int * d_bin_dimensions_x = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_x));
const int * d_bin_dimensions_y = (const int *)(mxGPUGetDataReadOnly(bin_dimensions_y));
const int * d_samples_in_bin = (const int *)(mxGPUGetDataReadOnly(samples_in_bin));
const int * d_bin_start_offset =(const int *)(mxGPUGetDataReadOnly(bin_start_offset));
const int * d_bin_location = (const int *)(mxGPUGetDataReadOnly(bin_location));
const float * d_bin_points_x = (const float *)(mxGPUGetDataReadOnly(bin_points_x));
const float * d_bin_points_y = (const float *)(mxGPUGetDataReadOnly(bin_points_y));
float * d_kernel_lookup_table = ( float *)(mxGPUGetDataReadOnly(kernel_lookup_table));
const uint2 grid_size = {grid_dim[0],grid_dim[1]};
//float2 * d_grid_values = (float2 *)(mxGPUGetData(grid_values));
cusp::complex<float> * d_grid_values = (cusp::complex<float> *)(mxGPUGetData(grid_values));
//--------------------------------------------
/*
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = d_kernel_lookup_table;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = kernel_lookup_table_size*sizeof(float);
//resDesc.resType = cudaResourceTypeArray;
// resDesc.res.array.array = (cudaArray* ) d_kernel_lookup_table;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.readMode = cudaReadModeElementType;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.normalizedCoords = 1;
// create texture object: we only have to do this once!
cudaTextureObject_t texRef=0;
cudaCreateTextureObject(&texRef, &resDesc, &texDesc, NULL);
//--------------------------------------------
*/
//
grid_points_cuda_interleaved_mex( d_samples_x, d_samples_y,
d_samples_values, npoints,
grid_size, d_samples_per_bin, d_bin_dimensions_x, d_bin_dimensions_y,
d_samples_in_bin, d_bin_start_offset, d_bin_location,
d_bin_points_x, d_bin_points_y,
nbins, d_kernel_lookup_table,
kernel_lookup_table_size,
kernel_lookup_table_scale, d_grid_values);
//mexErrMsgTxt("gpuArray 2");
plhs[0] = mxGPUCreateMxArrayOnGPU(grid_values);
/*
*/
mxGPUDestroyGPUArray( samples_x);
mxGPUDestroyGPUArray( samples_y);
mxGPUDestroyGPUArray( samples_values);
mxGPUDestroyGPUArray( samples_per_bin);
mxGPUDestroyGPUArray( bin_dimensions_x);
mxGPUDestroyGPUArray( bin_dimensions_y);
mxGPUDestroyGPUArray( samples_in_bin);
mxGPUDestroyGPUArray( kernel_lookup_table);
mxGPUDestroyGPUArray( bin_start_offset);
mxGPUDestroyGPUArray( bin_location);
mxGPUDestroyGPUArray( bin_points_x);
mxGPUDestroyGPUArray( bin_points_y);
mxGPUDestroyGPUArray( grid_values);
}
|
835fb55f9bf611acf9b86fbf576282d15cf3a0d9.hip | // !!! This is a file automatically generated by hipify!!!
#define NO_QT
#include <utility/math.h>
#include <render/quadRender/quadRender.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <sstream>
#include <texture_types.h>
#include <vector_functions.h>
#include <hip/hip_vector_types.h>
//#define M_PI 3.1415926535897932384626422832795028841971f
#define TWO_PI 6.2831853071795864769252867665590057683943f
#define NUDGE_FACTOR 1e-3f // epsilon
#define samps 1 // samples
#define BVH_STACK_SIZE 32
__device__ __constant__ SceneInformation cScene;
surface<void, cudaSurfaceType2D> surfaceWriteOut;
struct Ray {
float3 orig; // ray origin
float3 dir; // ray direction
__device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {}
};
enum Refl_t { DIFF, METAL, SPEC, REFR, COAT }; // material types
struct Sphere {
float rad;
float3 pos, emi, col;
Refl_t refl;
__device__ float intersect(const Ray &r) const {
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = math::dot(op, r.dir);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
};
__device__ Sphere spheres[] = {
{16, {128.0f, 128, 128}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
//{2, {0.f, 24.f, 0}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
//{1, {0.f, 0.f, 24.f}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
{10000, {50.0f, 40.8f, -1060}, {0.55, 0.55, 0.55}, {0.175f, 0.175f, 0.175f}, DIFF},
{100000, {0.0f, 0, -100000.}, {0, 0, 0}, {0.5f, 0.0f, 0.0f}, COAT},
{100000, {0.0f, 0, -100000.1}, {0, 0, 0}, {0.3f, 0.3f, 0.3f}, DIFF},
//{1.1, {1.6, 1.0, 0}, {0, 0.0, 0}, {0.9f, .9f, 0.9f}, SPEC},
//{0.3, {0.0f, -0.4, 4}, {.0, 0., .0}, {0.9f, 0.9f, 0.9f}, DIFF},
};
__device__ bool RayIntersectsBox(const gpuBVH& bvh, const float3 &originInWorldSpace, const float3 &rayInWorldSpace, int boxIdx) {
float Tnear, Tfar;
Tnear = -FLT_MAX;
Tfar = FLT_MAX;
float2 limits;
#define CHECK_NEAR_AND_FAR_INTERSECTION(c) \
if (rayInWorldSpace.##c == 0.f) { \
if (originInWorldSpace.##c < limits.x) \
return false; \
if (originInWorldSpace.##c > limits.y) \
return false; \
} else { \
float T1 = (limits.x - originInWorldSpace.##c) / rayInWorldSpace.##c; \
float T2 = (limits.y - originInWorldSpace.##c) / rayInWorldSpace.##c; \
if (T1 > T2) { \
float tmp = T1; \
T1 = T2; \
T2 = tmp; \
} \
if (T1 > Tnear) \
Tnear = T1; \
if (T2 < Tfar) \
Tfar = T2; \
if (Tnear > Tfar) \
return false; \
if (Tfar < 0.f) \
return false; \
}
auto lim = bvh.cudaBVHlimits[boxIdx];
limits = float2{ lim.bottom.x, lim.top.x };
CHECK_NEAR_AND_FAR_INTERSECTION(x)
limits = float2{ lim.bottom.y, lim.top.y };
CHECK_NEAR_AND_FAR_INTERSECTION(y)
limits = float2{ lim.bottom.z, lim.top.z };
CHECK_NEAR_AND_FAR_INTERSECTION(z)
return true;
}
__device__ bool BVH_IntersectTriangles(gpuBVH& bvh, const float3 &origin, const float3 &ray,
unsigned avoidSelf, int &pBestTriIdx, float3 &pointHitInWorldSpace, float &kAB,
float &kBC, float &kCA, float &hitdist, float3 &boxnormal) {
pBestTriIdx = -1;
float bestTriDist;
bestTriDist = FLT_MAX;
int32_t stack[BVH_STACK_SIZE];
int32_t stackIdx = 0;
stack[stackIdx++] = 0;
while (stackIdx) {
int32_t boxIdx = stack[stackIdx - 1];
stackIdx--;
uint4 data = bvh.cudaBVHindexesOrTrilists[boxIdx];
if (!(data.x & 0x80000000)) { // INNER NODE
if (RayIntersectsBox(bvh, origin, ray, boxIdx)) {
stack[stackIdx++] = data.y;
stack[stackIdx++] = data.z;
if (stackIdx > BVH_STACK_SIZE) {
return false;
}
}
} else {
for (uint32_t i = data.w; i < data.w + (data.x & 0x7fffffff); i++) {
int32_t idx = bvh.cudaTriIdxList[i];
if (avoidSelf == idx)
continue;
float4 normal = bvh.cudaTriangleIntersectionData[idx].normal;
float d = math::sqlength3(normal);
float k = math::dot3(normal, ray);
if (k == 0.0f)
continue;
float s = (normal.w - math::dot3(normal, origin)) / k;
if (s <= 0.0f)
continue;
if (s <= NUDGE_FACTOR)
continue;
float3 hit = ray * s;
hit += origin;
float4 ee1 = bvh.cudaTriangleIntersectionData[idx].e1d1;
float kt1 = math::dot3(ee1, hit) - ee1.w;
if (kt1 < 0.0f)
continue;
float4 ee2 = bvh.cudaTriangleIntersectionData[idx].e2d2;
float kt2 = math::dot3(ee2, hit) - ee2.w;
if (kt2 < 0.0f)
continue;
float4 ee3 = bvh.cudaTriangleIntersectionData[idx].e3d3;
float kt3 = math::dot3(ee3, hit) - ee3.w;
if (kt3 < 0.0f)
continue;
{
float hitZ = math::sqdistance(origin, hit);
if (hitZ < bestTriDist) {
bestTriDist = hitZ;
hitdist = sqrtf(bestTriDist);
pBestTriIdx = idx;
pointHitInWorldSpace = hit;
kAB = kt1;
kBC = kt2;
kCA = kt3;
}
}
}
}
}
return pBestTriIdx != -1;
}
__device__ float3 path_trace(hiprandState_t *randstate, float3 originInWorldSpace, float3 rayInWorldSpace, int avoidSelf,
int32_t numBVHs, gpuBVH* sceneBVH) {
float3 mask = float3{1.0f, 1.0f, 1.0f};
float3 accucolor = float3{0.0f, 0.0f, 0.0f};
for (int32_t bounces = 0; bounces < 5; bounces++) {
int32_t sphere_id = -1;
int32_t triangle_id = -1;
int32_t pBestTriIdx = -1;
int32_t geomtype = -1;
const Triangle *pBestTri = NULL;
float3 pointHitInWorldSpace;
float d = 1e20f;
float scene_t = 1e20f;
float3 f = float3{0, 0, 0};
float3 emit = float3{0, 0, 0};
float3 x; // intersection point
float3 n; // normal
float3 nl; // oriented normal
float3 dw; // ray direction of next path segment
Refl_t refltype;
float3 rayorig = float3{originInWorldSpace.x, originInWorldSpace.y, originInWorldSpace.z};
float3 raydir = float3{rayInWorldSpace.x, rayInWorldSpace.y, rayInWorldSpace.z};
float numspheres = sizeof(spheres) / sizeof(Sphere);
for (int32_t i = int32_t(numspheres); i--;) {
if ((d = spheres[i].intersect(Ray(rayorig, raydir))) && d < scene_t) {
scene_t = d;
sphere_id = i;
geomtype = 1;
}
}
float kAB = 0.f, kBC = 0.f, kCA = 0.f;
int32_t bvh_idx = -1;
for (int32_t i = 0; i < numBVHs; ++i) {
if (!sceneBVH[i].active)
continue;
float ktAB = 0.f, ktBC = 0.f, ktCA = 0.f;
float hitdistance = 1e20f;
float3 boxnormal = float3{ 0, 0, 0 };
float3 point;
BVH_IntersectTriangles(sceneBVH[i], originInWorldSpace, rayInWorldSpace, avoidSelf, pBestTriIdx, point,
ktAB, ktBC, ktCA, hitdistance, boxnormal);
if (hitdistance < scene_t && hitdistance > 0.002f) // EPSILON
{
pointHitInWorldSpace = point;
scene_t = hitdistance;
triangle_id = pBestTriIdx;
avoidSelf = pBestTriIdx;
geomtype = 2;
bvh_idx = i;
kAB = ktAB;
kBC = ktBC;
kCA = ktCA;
}
}
if (scene_t > 1e20f)
return float3{0, 0, 0};
if (geomtype == 1) {
Sphere &sphere = spheres[sphere_id];
x = originInWorldSpace + rayInWorldSpace * scene_t;
n = math::normalize(float3{x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z});
nl = math::dot(n, rayInWorldSpace) < 0 ? n : n * -1;
f = float3{sphere.col.x, sphere.col.y, sphere.col.z};
refltype = sphere.refl;
emit = float3{sphere.emi.x, sphere.emi.y, sphere.emi.z};
accucolor += (mask * emit);
}
if (geomtype == 2) {
pBestTri = &sceneBVH[bvh_idx].pTriangles[triangle_id];
x = pointHitInWorldSpace;
n = math::normalize(math::castTo<float3>(pBestTri->normal));
auto i0 = pBestTri->i0;
auto i1 = pBestTri->i1;
auto i2 = pBestTri->i2;
auto v0 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i0].position);
auto v1 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i1].position);
auto v2 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i2].position);
auto n0 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i0].normal);
auto n1 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i1].normal);
auto n2 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i2].normal);
auto ab = v1 - v0;
auto bc = v2 - v1;
auto cross_ab_bc = math::cross(ab, bc);
auto area = math::length(cross_ab_bc);
auto ABx = kAB * math::distance(v0, v1);
auto BCx = kBC * math::distance(v1, v2);
auto CAx = kCA * math::distance(v2, v0);
n0 *= BCx / area;
n1 *= CAx / area;
n2 *= ABx / area;
n = math::normalize(n0 + n1 + n2);
//return n;
// n = math::normalize(math::castTo<float3>(kBC * n0 + kCA * n1 + kAB * n2));
nl = math::dot(n, rayInWorldSpace) < 0 ? n : n * -1;
float3 colour = float3{0.9f, 0.3f, 0.0f};
if (bvh_idx == 0) {
colour = float3{ 0.05098f, 0.23137f, 0.494177f };
refltype = DIFF;
//refltype = COAT;
}
if (bvh_idx == 1) {
colour = float3{ 0.9f, 0.9f, 0.9f };
refltype = DIFF;
}
f = colour;
emit = float3{0, 0, 0};
accucolor += (mask * emit);
}
if (refltype == DIFF) {
float phi = 2 * CUDART_PI_F * hiprand_uniform(randstate);
float r2 = hiprand_uniform(randstate);
float r2s = sqrtf(r2);
float3 w = math::normalize(nl);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{0, 1, 0} : float3{1, 0, 0}), w));
float3 v = math::cross(w, u);
dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
pointHitInWorldSpace = x + w * 0.01f;
mask *= f;
}
if (refltype == METAL) {
float phi = 2.f * CUDART_PI_F * hiprand_uniform(randstate);
float r2 = hiprand_uniform(randstate);
float phongexponent = 20;
float cosTheta = powf(1 - r2, 1.0f / (phongexponent + 1));
float sinTheta = sqrtf(1 - cosTheta * cosTheta);
float3 w = math::normalize(rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace));
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{0, 1, 0} : float3{1, 0, 0}), w));
float3 v = math::cross(w, u);
dw = math::normalize(u * cosf(phi) * sinTheta + v * sinf(phi) * sinTheta + w * cosTheta);
pointHitInWorldSpace = x + w * 0.01f;
mask *= f;
}
if (refltype == SPEC) {
dw = rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
mask *= f;
}
if (refltype == COAT) {
float rouletteRandomFloat = hiprand_uniform(randstate);
float threshold = 0.05f;
float3 specularColor = float3{1, 1, 1}; // hard-coded
bool reflectFromSurface = (rouletteRandomFloat < threshold);
if (reflectFromSurface) {
mask *= specularColor;
dw = rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
} else {
float r1 = 2.f * CUDART_PI_F * hiprand_uniform(randstate);
float r2 = hiprand_uniform(randstate);
float r2s = sqrtf(r2);
float3 w = math::normalize(nl);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{0, 1, 0} : float3{1, 0, 0}), w));
float3 v = math::cross(w, u);
dw = math::normalize(u * cosf(r1) * r2s + v * sinf(r1) * r2s + w * sqrtf(1 - r2));
pointHitInWorldSpace = x + nl * 0.01f;
mask *= f;
}
}
if (refltype == REFR) {
bool into = math::dot(n, nl) > 0;
float nc = 1.0f;
float nt = 1.5f;
float nnt = into ? nc / nt : nt / nc;
float ddn = math::dot(rayInWorldSpace, nl);
float cos2t = 1.0f - nnt * nnt * (1.f - ddn * ddn);
if (cos2t < 0.0f) {
dw = rayInWorldSpace;
dw -= n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
} else {
float3 tdir = rayInWorldSpace * nnt;
tdir -= n * ((into ? 1 : -1) * (ddn * nnt + sqrtf(cos2t)));
tdir = math::normalize(tdir);
float R0 = (nt - nc) * (nt - nc) / (nt + nc) * (nt + nc);
float c = 1.f - (into ? -ddn : math::dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
if (hiprand_uniform(randstate) < 0.25f) {
mask *= RP;
dw = rayInWorldSpace;
dw -= n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
} else {
mask *= TP;
dw = tdir;
pointHitInWorldSpace = x + nl * 0.001f;
}
}
}
originInWorldSpace = pointHitInWorldSpace;
rayInWorldSpace = dw;
}
return float3{accucolor.x, accucolor.y, accucolor.z};
}
__global__ void CoreLoopPathTracingKernel(float3 *accumbuffer, int32_t numBVHs, gpuBVH* sceneBVH, unsigned int framenumber,
unsigned int hashedframenumber) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
hiprandState_t randState;
hiprand_init(hashedframenumber + threadId, 0, 0, &randState);
float3 rendercampos = float3{cScene.m_camera.position.x, cScene.m_camera.position.y, cScene.m_camera.position.z};
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t pixelx = x;
int32_t pixely = cScene.height - y - 1;
float3 finalcol = float3{0.0f, 0.0f, 0.0f};
for (int32_t s = 0; s < 1; s++) {
float3 rendercamview =
math::normalize(float3{cScene.m_camera.view.x, cScene.m_camera.view.y, cScene.m_camera.view.z});
float3 rendercamup = math::normalize(float3{cScene.m_camera.up.x, cScene.m_camera.up.y, cScene.m_camera.up.z});
float3 horizontalAxis = math::normalize(math::cross(rendercamview, rendercamup));
float3 verticalAxis = math::normalize(math::cross(horizontalAxis, rendercamview));
float3 middle = rendercampos + rendercamview;
float3 horizontal = horizontalAxis * tanf(cScene.m_camera.fov.x * 0.5f * (CUDART_PI_F / 180));
float3 vertical = -verticalAxis * tanf(-cScene.m_camera.fov.y * 0.5f * (CUDART_PI_F / 180));
float jitterValueX = hiprand_uniform(&randState) - 0.5f;
float jitterValueY = hiprand_uniform(&randState) - 0.5f;
float sx = (jitterValueX + pixelx) / (cScene.width - 1);
float sy = (jitterValueY + pixely) / (cScene.height - 1);
// compute pixel on screen
float3 pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1));
float3 pointOnImagePlane =
rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * cScene.m_camera.focalDistance);
float3 aperturePoint;
if (cScene.m_camera.apertureRadius > 0.00001f) {
float random1 = hiprand_uniform(&randState);
float random2 = hiprand_uniform(&randState);
float angle = TWO_PI * random1;
float distance = cScene.m_camera.apertureRadius * sqrtf(random2);
float apertureX = cos(angle) * distance;
float apertureY = sin(angle) * distance;
aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY);
} else {
aperturePoint = rendercampos;
}
float3 apertureToImagePlane = pointOnImagePlane - aperturePoint;
apertureToImagePlane = math::normalize(apertureToImagePlane);
float3 rayInWorldSpace = math::normalize(apertureToImagePlane);
float3 originInWorldSpace = aperturePoint;
finalcol += path_trace(&randState, originInWorldSpace, rayInWorldSpace, -1, numBVHs, sceneBVH) * (1.0f / samps);
}
accumbuffer[i] += finalcol;
float3 tempcol = accumbuffer[i] / framenumber;
float3 colour = float3{math::clamp(tempcol.x, 0.0f, 1.0f), math::clamp(tempcol.y, 0.0f, 1.0f),
math::clamp(tempcol.z, 0.0f, 1.0f)};
float4 out{(powf(colour.x, 1 / 2.2f)), (powf(colour.y, 1 / 2.2f)), (powf(colour.z, 1 / 2.2f)), 1.f};
surf2Dwrite(out, surfaceWriteOut, x * sizeof(float4), y, hipBoundaryModeClamp);
}
void cudaRender(SceneInformation scene, cudaGraphicsResource_t resource, objectLoader &sceneMeshes, objectLoader &fluidMeshes, float3 *acc,
unsigned framenumber, unsigned hashedframes) {
static bool once = true;
static gpuBVH* bvhs = nullptr;
if (once) {
hipArray_t color_arr;
hipGraphicsMapResources(1, &resource, 0);
hipGraphicsSubResourceGetMappedArray(&color_arr, resource, 0, 0);
hipBindSurfaceToArray(surfaceWriteOut, color_arr);
hipMalloc(&bvhs, sizeof(gpuBVH) * 2);
once = false;
}
hipMemcpyToSymbol(cScene, &scene, sizeof(SceneInformation));
dim3 texturedim((uint32_t)scene.width, (uint32_t)scene.height, 1);
dim3 blockdim(16, 16, 1);
dim3 griddim(texturedim.x / blockdim.x, texturedim.y / blockdim.y, 1);
if (texturedim.x % blockdim.x != 0)
griddim.x += 1;
if (texturedim.y % blockdim.y != 0)
griddim.y += 1;
gpuBVH bvhs_host[] = { fluidMeshes.getGPUArrays(), sceneMeshes.getGPUArrays() };
hipMemcpy(bvhs, bvhs_host, sizeof(gpuBVH) * 2, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( CoreLoopPathTracingKernel), dim3(griddim), dim3(blockdim), 0, 0, (float3 *)acc, 2, bvhs, framenumber,
hashedframes);
hipDeviceSynchronize();
} | 835fb55f9bf611acf9b86fbf576282d15cf3a0d9.cu | #define NO_QT
#include <utility/math.h>
#include <render/quadRender/quadRender.h>
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <sstream>
#include <texture_types.h>
#include <vector_functions.h>
#include <vector_types.h>
//#define M_PI 3.1415926535897932384626422832795028841971f
#define TWO_PI 6.2831853071795864769252867665590057683943f
#define NUDGE_FACTOR 1e-3f // epsilon
#define samps 1 // samples
#define BVH_STACK_SIZE 32
__device__ __constant__ SceneInformation cScene;
surface<void, cudaSurfaceType2D> surfaceWriteOut;
struct Ray {
float3 orig; // ray origin
float3 dir; // ray direction
__device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {}
};
enum Refl_t { DIFF, METAL, SPEC, REFR, COAT }; // material types
struct Sphere {
float rad;
float3 pos, emi, col;
Refl_t refl;
__device__ float intersect(const Ray &r) const {
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = math::dot(op, r.dir);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
};
__device__ Sphere spheres[] = {
{16, {128.0f, 128, 128}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
//{2, {0.f, 24.f, 0}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
//{1, {0.f, 0.f, 24.f}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
{10000, {50.0f, 40.8f, -1060}, {0.55, 0.55, 0.55}, {0.175f, 0.175f, 0.175f}, DIFF},
{100000, {0.0f, 0, -100000.}, {0, 0, 0}, {0.5f, 0.0f, 0.0f}, COAT},
{100000, {0.0f, 0, -100000.1}, {0, 0, 0}, {0.3f, 0.3f, 0.3f}, DIFF},
//{1.1, {1.6, 1.0, 0}, {0, 0.0, 0}, {0.9f, .9f, 0.9f}, SPEC},
//{0.3, {0.0f, -0.4, 4}, {.0, 0., .0}, {0.9f, 0.9f, 0.9f}, DIFF},
};
__device__ bool RayIntersectsBox(const gpuBVH& bvh, const float3 &originInWorldSpace, const float3 &rayInWorldSpace, int boxIdx) {
float Tnear, Tfar;
Tnear = -FLT_MAX;
Tfar = FLT_MAX;
float2 limits;
#define CHECK_NEAR_AND_FAR_INTERSECTION(c) \
if (rayInWorldSpace.##c == 0.f) { \
if (originInWorldSpace.##c < limits.x) \
return false; \
if (originInWorldSpace.##c > limits.y) \
return false; \
} else { \
float T1 = (limits.x - originInWorldSpace.##c) / rayInWorldSpace.##c; \
float T2 = (limits.y - originInWorldSpace.##c) / rayInWorldSpace.##c; \
if (T1 > T2) { \
float tmp = T1; \
T1 = T2; \
T2 = tmp; \
} \
if (T1 > Tnear) \
Tnear = T1; \
if (T2 < Tfar) \
Tfar = T2; \
if (Tnear > Tfar) \
return false; \
if (Tfar < 0.f) \
return false; \
}
auto lim = bvh.cudaBVHlimits[boxIdx];
limits = float2{ lim.bottom.x, lim.top.x };
CHECK_NEAR_AND_FAR_INTERSECTION(x)
limits = float2{ lim.bottom.y, lim.top.y };
CHECK_NEAR_AND_FAR_INTERSECTION(y)
limits = float2{ lim.bottom.z, lim.top.z };
CHECK_NEAR_AND_FAR_INTERSECTION(z)
return true;
}
__device__ bool BVH_IntersectTriangles(gpuBVH& bvh, const float3 &origin, const float3 &ray,
unsigned avoidSelf, int &pBestTriIdx, float3 &pointHitInWorldSpace, float &kAB,
float &kBC, float &kCA, float &hitdist, float3 &boxnormal) {
pBestTriIdx = -1;
float bestTriDist;
bestTriDist = FLT_MAX;
int32_t stack[BVH_STACK_SIZE];
int32_t stackIdx = 0;
stack[stackIdx++] = 0;
while (stackIdx) {
int32_t boxIdx = stack[stackIdx - 1];
stackIdx--;
uint4 data = bvh.cudaBVHindexesOrTrilists[boxIdx];
if (!(data.x & 0x80000000)) { // INNER NODE
if (RayIntersectsBox(bvh, origin, ray, boxIdx)) {
stack[stackIdx++] = data.y;
stack[stackIdx++] = data.z;
if (stackIdx > BVH_STACK_SIZE) {
return false;
}
}
} else {
for (uint32_t i = data.w; i < data.w + (data.x & 0x7fffffff); i++) {
int32_t idx = bvh.cudaTriIdxList[i];
if (avoidSelf == idx)
continue;
float4 normal = bvh.cudaTriangleIntersectionData[idx].normal;
float d = math::sqlength3(normal);
float k = math::dot3(normal, ray);
if (k == 0.0f)
continue;
float s = (normal.w - math::dot3(normal, origin)) / k;
if (s <= 0.0f)
continue;
if (s <= NUDGE_FACTOR)
continue;
float3 hit = ray * s;
hit += origin;
float4 ee1 = bvh.cudaTriangleIntersectionData[idx].e1d1;
float kt1 = math::dot3(ee1, hit) - ee1.w;
if (kt1 < 0.0f)
continue;
float4 ee2 = bvh.cudaTriangleIntersectionData[idx].e2d2;
float kt2 = math::dot3(ee2, hit) - ee2.w;
if (kt2 < 0.0f)
continue;
float4 ee3 = bvh.cudaTriangleIntersectionData[idx].e3d3;
float kt3 = math::dot3(ee3, hit) - ee3.w;
if (kt3 < 0.0f)
continue;
{
float hitZ = math::sqdistance(origin, hit);
if (hitZ < bestTriDist) {
bestTriDist = hitZ;
hitdist = sqrtf(bestTriDist);
pBestTriIdx = idx;
pointHitInWorldSpace = hit;
kAB = kt1;
kBC = kt2;
kCA = kt3;
}
}
}
}
}
return pBestTriIdx != -1;
}
__device__ float3 path_trace(curandState *randstate, float3 originInWorldSpace, float3 rayInWorldSpace, int avoidSelf,
int32_t numBVHs, gpuBVH* sceneBVH) {
float3 mask = float3{1.0f, 1.0f, 1.0f};
float3 accucolor = float3{0.0f, 0.0f, 0.0f};
for (int32_t bounces = 0; bounces < 5; bounces++) {
int32_t sphere_id = -1;
int32_t triangle_id = -1;
int32_t pBestTriIdx = -1;
int32_t geomtype = -1;
const Triangle *pBestTri = NULL;
float3 pointHitInWorldSpace;
float d = 1e20f;
float scene_t = 1e20f;
float3 f = float3{0, 0, 0};
float3 emit = float3{0, 0, 0};
float3 x; // intersection point
float3 n; // normal
float3 nl; // oriented normal
float3 dw; // ray direction of next path segment
Refl_t refltype;
float3 rayorig = float3{originInWorldSpace.x, originInWorldSpace.y, originInWorldSpace.z};
float3 raydir = float3{rayInWorldSpace.x, rayInWorldSpace.y, rayInWorldSpace.z};
float numspheres = sizeof(spheres) / sizeof(Sphere);
for (int32_t i = int32_t(numspheres); i--;) {
if ((d = spheres[i].intersect(Ray(rayorig, raydir))) && d < scene_t) {
scene_t = d;
sphere_id = i;
geomtype = 1;
}
}
float kAB = 0.f, kBC = 0.f, kCA = 0.f;
int32_t bvh_idx = -1;
for (int32_t i = 0; i < numBVHs; ++i) {
if (!sceneBVH[i].active)
continue;
float ktAB = 0.f, ktBC = 0.f, ktCA = 0.f;
float hitdistance = 1e20f;
float3 boxnormal = float3{ 0, 0, 0 };
float3 point;
BVH_IntersectTriangles(sceneBVH[i], originInWorldSpace, rayInWorldSpace, avoidSelf, pBestTriIdx, point,
ktAB, ktBC, ktCA, hitdistance, boxnormal);
if (hitdistance < scene_t && hitdistance > 0.002f) // EPSILON
{
pointHitInWorldSpace = point;
scene_t = hitdistance;
triangle_id = pBestTriIdx;
avoidSelf = pBestTriIdx;
geomtype = 2;
bvh_idx = i;
kAB = ktAB;
kBC = ktBC;
kCA = ktCA;
}
}
if (scene_t > 1e20f)
return float3{0, 0, 0};
if (geomtype == 1) {
Sphere &sphere = spheres[sphere_id];
x = originInWorldSpace + rayInWorldSpace * scene_t;
n = math::normalize(float3{x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z});
nl = math::dot(n, rayInWorldSpace) < 0 ? n : n * -1;
f = float3{sphere.col.x, sphere.col.y, sphere.col.z};
refltype = sphere.refl;
emit = float3{sphere.emi.x, sphere.emi.y, sphere.emi.z};
accucolor += (mask * emit);
}
if (geomtype == 2) {
pBestTri = &sceneBVH[bvh_idx].pTriangles[triangle_id];
x = pointHitInWorldSpace;
n = math::normalize(math::castTo<float3>(pBestTri->normal));
auto i0 = pBestTri->i0;
auto i1 = pBestTri->i1;
auto i2 = pBestTri->i2;
auto v0 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i0].position);
auto v1 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i1].position);
auto v2 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i2].position);
auto n0 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i0].normal);
auto n1 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i1].normal);
auto n2 = math::castTo<float3>(sceneBVH[bvh_idx].vertices[i2].normal);
auto ab = v1 - v0;
auto bc = v2 - v1;
auto cross_ab_bc = math::cross(ab, bc);
auto area = math::length(cross_ab_bc);
auto ABx = kAB * math::distance(v0, v1);
auto BCx = kBC * math::distance(v1, v2);
auto CAx = kCA * math::distance(v2, v0);
n0 *= BCx / area;
n1 *= CAx / area;
n2 *= ABx / area;
n = math::normalize(n0 + n1 + n2);
//return n;
// n = math::normalize(math::castTo<float3>(kBC * n0 + kCA * n1 + kAB * n2));
nl = math::dot(n, rayInWorldSpace) < 0 ? n : n * -1;
float3 colour = float3{0.9f, 0.3f, 0.0f};
if (bvh_idx == 0) {
colour = float3{ 0.05098f, 0.23137f, 0.494177f };
refltype = DIFF;
//refltype = COAT;
}
if (bvh_idx == 1) {
colour = float3{ 0.9f, 0.9f, 0.9f };
refltype = DIFF;
}
f = colour;
emit = float3{0, 0, 0};
accucolor += (mask * emit);
}
if (refltype == DIFF) {
float phi = 2 * CUDART_PI_F * curand_uniform(randstate);
float r2 = curand_uniform(randstate);
float r2s = sqrtf(r2);
float3 w = math::normalize(nl);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{0, 1, 0} : float3{1, 0, 0}), w));
float3 v = math::cross(w, u);
dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
pointHitInWorldSpace = x + w * 0.01f;
mask *= f;
}
if (refltype == METAL) {
float phi = 2.f * CUDART_PI_F * curand_uniform(randstate);
float r2 = curand_uniform(randstate);
float phongexponent = 20;
float cosTheta = powf(1 - r2, 1.0f / (phongexponent + 1));
float sinTheta = sqrtf(1 - cosTheta * cosTheta);
float3 w = math::normalize(rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace));
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{0, 1, 0} : float3{1, 0, 0}), w));
float3 v = math::cross(w, u);
dw = math::normalize(u * cosf(phi) * sinTheta + v * sinf(phi) * sinTheta + w * cosTheta);
pointHitInWorldSpace = x + w * 0.01f;
mask *= f;
}
if (refltype == SPEC) {
dw = rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
mask *= f;
}
if (refltype == COAT) {
float rouletteRandomFloat = curand_uniform(randstate);
float threshold = 0.05f;
float3 specularColor = float3{1, 1, 1}; // hard-coded
bool reflectFromSurface = (rouletteRandomFloat < threshold);
if (reflectFromSurface) {
mask *= specularColor;
dw = rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
} else {
float r1 = 2.f * CUDART_PI_F * curand_uniform(randstate);
float r2 = curand_uniform(randstate);
float r2s = sqrtf(r2);
float3 w = math::normalize(nl);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{0, 1, 0} : float3{1, 0, 0}), w));
float3 v = math::cross(w, u);
dw = math::normalize(u * cosf(r1) * r2s + v * sinf(r1) * r2s + w * sqrtf(1 - r2));
pointHitInWorldSpace = x + nl * 0.01f;
mask *= f;
}
}
if (refltype == REFR) {
bool into = math::dot(n, nl) > 0;
float nc = 1.0f;
float nt = 1.5f;
float nnt = into ? nc / nt : nt / nc;
float ddn = math::dot(rayInWorldSpace, nl);
float cos2t = 1.0f - nnt * nnt * (1.f - ddn * ddn);
if (cos2t < 0.0f) {
dw = rayInWorldSpace;
dw -= n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
} else {
float3 tdir = rayInWorldSpace * nnt;
tdir -= n * ((into ? 1 : -1) * (ddn * nnt + sqrtf(cos2t)));
tdir = math::normalize(tdir);
float R0 = (nt - nc) * (nt - nc) / (nt + nc) * (nt + nc);
float c = 1.f - (into ? -ddn : math::dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
if (curand_uniform(randstate) < 0.25f) {
mask *= RP;
dw = rayInWorldSpace;
dw -= n * 2.0f * math::dot(n, rayInWorldSpace);
pointHitInWorldSpace = x + nl * 0.01f;
} else {
mask *= TP;
dw = tdir;
pointHitInWorldSpace = x + nl * 0.001f;
}
}
}
originInWorldSpace = pointHitInWorldSpace;
rayInWorldSpace = dw;
}
return float3{accucolor.x, accucolor.y, accucolor.z};
}
__global__ void CoreLoopPathTracingKernel(float3 *accumbuffer, int32_t numBVHs, gpuBVH* sceneBVH, unsigned int framenumber,
unsigned int hashedframenumber) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
curandState randState;
curand_init(hashedframenumber + threadId, 0, 0, &randState);
float3 rendercampos = float3{cScene.m_camera.position.x, cScene.m_camera.position.y, cScene.m_camera.position.z};
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t pixelx = x;
int32_t pixely = cScene.height - y - 1;
float3 finalcol = float3{0.0f, 0.0f, 0.0f};
for (int32_t s = 0; s < 1; s++) {
float3 rendercamview =
math::normalize(float3{cScene.m_camera.view.x, cScene.m_camera.view.y, cScene.m_camera.view.z});
float3 rendercamup = math::normalize(float3{cScene.m_camera.up.x, cScene.m_camera.up.y, cScene.m_camera.up.z});
float3 horizontalAxis = math::normalize(math::cross(rendercamview, rendercamup));
float3 verticalAxis = math::normalize(math::cross(horizontalAxis, rendercamview));
float3 middle = rendercampos + rendercamview;
float3 horizontal = horizontalAxis * tanf(cScene.m_camera.fov.x * 0.5f * (CUDART_PI_F / 180));
float3 vertical = -verticalAxis * tanf(-cScene.m_camera.fov.y * 0.5f * (CUDART_PI_F / 180));
float jitterValueX = curand_uniform(&randState) - 0.5f;
float jitterValueY = curand_uniform(&randState) - 0.5f;
float sx = (jitterValueX + pixelx) / (cScene.width - 1);
float sy = (jitterValueY + pixely) / (cScene.height - 1);
// compute pixel on screen
float3 pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1));
float3 pointOnImagePlane =
rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * cScene.m_camera.focalDistance);
float3 aperturePoint;
if (cScene.m_camera.apertureRadius > 0.00001f) {
float random1 = curand_uniform(&randState);
float random2 = curand_uniform(&randState);
float angle = TWO_PI * random1;
float distance = cScene.m_camera.apertureRadius * sqrtf(random2);
float apertureX = cos(angle) * distance;
float apertureY = sin(angle) * distance;
aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY);
} else {
aperturePoint = rendercampos;
}
float3 apertureToImagePlane = pointOnImagePlane - aperturePoint;
apertureToImagePlane = math::normalize(apertureToImagePlane);
float3 rayInWorldSpace = math::normalize(apertureToImagePlane);
float3 originInWorldSpace = aperturePoint;
finalcol += path_trace(&randState, originInWorldSpace, rayInWorldSpace, -1, numBVHs, sceneBVH) * (1.0f / samps);
}
accumbuffer[i] += finalcol;
float3 tempcol = accumbuffer[i] / framenumber;
float3 colour = float3{math::clamp(tempcol.x, 0.0f, 1.0f), math::clamp(tempcol.y, 0.0f, 1.0f),
math::clamp(tempcol.z, 0.0f, 1.0f)};
float4 out{(powf(colour.x, 1 / 2.2f)), (powf(colour.y, 1 / 2.2f)), (powf(colour.z, 1 / 2.2f)), 1.f};
surf2Dwrite(out, surfaceWriteOut, x * sizeof(float4), y, cudaBoundaryModeClamp);
}
void cudaRender(SceneInformation scene, cudaGraphicsResource_t resource, objectLoader &sceneMeshes, objectLoader &fluidMeshes, float3 *acc,
unsigned framenumber, unsigned hashedframes) {
static bool once = true;
static gpuBVH* bvhs = nullptr;
if (once) {
cudaArray_t color_arr;
cudaGraphicsMapResources(1, &resource, 0);
cudaGraphicsSubResourceGetMappedArray(&color_arr, resource, 0, 0);
cudaBindSurfaceToArray(surfaceWriteOut, color_arr);
cudaMalloc(&bvhs, sizeof(gpuBVH) * 2);
once = false;
}
cudaMemcpyToSymbol(cScene, &scene, sizeof(SceneInformation));
dim3 texturedim((uint32_t)scene.width, (uint32_t)scene.height, 1);
dim3 blockdim(16, 16, 1);
dim3 griddim(texturedim.x / blockdim.x, texturedim.y / blockdim.y, 1);
if (texturedim.x % blockdim.x != 0)
griddim.x += 1;
if (texturedim.y % blockdim.y != 0)
griddim.y += 1;
gpuBVH bvhs_host[] = { fluidMeshes.getGPUArrays(), sceneMeshes.getGPUArrays() };
cudaMemcpy(bvhs, bvhs_host, sizeof(gpuBVH) * 2, cudaMemcpyHostToDevice);
CoreLoopPathTracingKernel<<<griddim, blockdim>>>((float3 *)acc, 2, bvhs, framenumber,
hashedframes);
cudaDeviceSynchronize();
} |
8cf144f2226de29264b5d96245f235f64fbf3140.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/temporal_shift_kernel.h"
namespace phi {
template <typename T>
__global__ void KeTemporalShiftFwNCHW(const T* input,
T* output,
const int ntchw,
const int tchw,
const int chw,
const int hw,
const int t,
const int c1,
const int c2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int src_it = 0;
for (; tid < ntchw; tid += stride) {
int it = (tid % tchw) / chw;
int ic = (tid % chw) / hw;
if (ic < c1) {
src_it = it - 1;
} else if (ic < c2) {
src_it = it + 1;
} else {
src_it = it;
}
if (src_it < 0 || src_it >= t) {
output[tid] = 0;
} else {
output[tid] = input[tid + (src_it - it) * chw];
}
}
}
template <typename T>
__global__ void KeTemporalShiftFwNHWC(const T* input,
T* output,
const int nthwc,
const int thwc,
const int hwc,
const int t,
const int c,
const int c1,
const int c2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int src_it = 0;
for (; tid < nthwc; tid += stride) {
int it = (tid % thwc) / hwc;
int ic = tid % c;
if (ic < c1) {
src_it = it - 1;
} else if (ic < c2) {
src_it = it + 1;
} else {
src_it = it;
}
if (src_it < 0 || src_it >= t) {
output[tid] = 0;
} else {
output[tid] = input[tid + (src_it - it) * hwc];
}
}
}
template <typename T, typename Context>
void TemporalShiftKernel(const Context& dev_ctx,
const DenseTensor& x,
int seg_num,
float shift_ratio,
const std::string& data_format_str,
DenseTensor* out) {
auto* input = &x;
auto* output = out;
int t = seg_num;
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_format_str);
const int nt = input->dims()[0];
const int c =
(data_layout == DataLayout::kNCHW ? input->dims()[1] : input->dims()[3]);
const int h =
(data_layout == DataLayout::kNCHW ? input->dims()[2] : input->dims()[1]);
const int w =
(data_layout == DataLayout::kNCHW ? input->dims()[3] : input->dims()[2]);
const int hw = h * w;
const int chw = c * hw;
const int tchw = t * chw;
const int ntchw = nt * chw;
const int c1 = static_cast<int>(c * shift_ratio);
const int c2 = static_cast<int>(c * 2 * shift_ratio);
DDim out_dims =
(data_layout == DataLayout::kNCHW ? phi::make_ddim({nt, c, h, w})
: phi::make_ddim({nt, h, w, c}));
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(out_dims, dev_ctx.GetPlace());
int pixelNum = nt * chw;
int threads = 1024;
int grid = (pixelNum + threads - 1) / threads;
int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() / threads;
grid = ::min(dev_ctx.GetSMCount() * blocks_per_sm, grid);
if (data_layout == DataLayout::kNCHW) {
hipLaunchKernelGGL(( KeTemporalShiftFwNCHW<T>), dim3(grid), dim3(threads), 0, dev_ctx.stream(),
input_data, output_data, ntchw, tchw, chw, hw, t, c1, c2);
} else {
hipLaunchKernelGGL(( KeTemporalShiftFwNHWC<T>), dim3(grid), dim3(threads), 0, dev_ctx.stream(),
input_data, output_data, ntchw, tchw, chw, t, c, c1, c2);
}
}
} // namespace phi
PD_REGISTER_KERNEL(temporal_shift,
GPU,
ALL_LAYOUT,
phi::TemporalShiftKernel,
float,
double,
phi::dtype::float16) {}
| 8cf144f2226de29264b5d96245f235f64fbf3140.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/temporal_shift_kernel.h"
namespace phi {
template <typename T>
__global__ void KeTemporalShiftFwNCHW(const T* input,
T* output,
const int ntchw,
const int tchw,
const int chw,
const int hw,
const int t,
const int c1,
const int c2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int src_it = 0;
for (; tid < ntchw; tid += stride) {
int it = (tid % tchw) / chw;
int ic = (tid % chw) / hw;
if (ic < c1) {
src_it = it - 1;
} else if (ic < c2) {
src_it = it + 1;
} else {
src_it = it;
}
if (src_it < 0 || src_it >= t) {
output[tid] = 0;
} else {
output[tid] = input[tid + (src_it - it) * chw];
}
}
}
template <typename T>
__global__ void KeTemporalShiftFwNHWC(const T* input,
T* output,
const int nthwc,
const int thwc,
const int hwc,
const int t,
const int c,
const int c1,
const int c2) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int src_it = 0;
for (; tid < nthwc; tid += stride) {
int it = (tid % thwc) / hwc;
int ic = tid % c;
if (ic < c1) {
src_it = it - 1;
} else if (ic < c2) {
src_it = it + 1;
} else {
src_it = it;
}
if (src_it < 0 || src_it >= t) {
output[tid] = 0;
} else {
output[tid] = input[tid + (src_it - it) * hwc];
}
}
}
template <typename T, typename Context>
void TemporalShiftKernel(const Context& dev_ctx,
const DenseTensor& x,
int seg_num,
float shift_ratio,
const std::string& data_format_str,
DenseTensor* out) {
auto* input = &x;
auto* output = out;
int t = seg_num;
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_format_str);
const int nt = input->dims()[0];
const int c =
(data_layout == DataLayout::kNCHW ? input->dims()[1] : input->dims()[3]);
const int h =
(data_layout == DataLayout::kNCHW ? input->dims()[2] : input->dims()[1]);
const int w =
(data_layout == DataLayout::kNCHW ? input->dims()[3] : input->dims()[2]);
const int hw = h * w;
const int chw = c * hw;
const int tchw = t * chw;
const int ntchw = nt * chw;
const int c1 = static_cast<int>(c * shift_ratio);
const int c2 = static_cast<int>(c * 2 * shift_ratio);
DDim out_dims =
(data_layout == DataLayout::kNCHW ? phi::make_ddim({nt, c, h, w})
: phi::make_ddim({nt, h, w, c}));
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(out_dims, dev_ctx.GetPlace());
int pixelNum = nt * chw;
int threads = 1024;
int grid = (pixelNum + threads - 1) / threads;
int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() / threads;
grid = std::min(dev_ctx.GetSMCount() * blocks_per_sm, grid);
if (data_layout == DataLayout::kNCHW) {
KeTemporalShiftFwNCHW<T><<<grid, threads, 0, dev_ctx.stream()>>>(
input_data, output_data, ntchw, tchw, chw, hw, t, c1, c2);
} else {
KeTemporalShiftFwNHWC<T><<<grid, threads, 0, dev_ctx.stream()>>>(
input_data, output_data, ntchw, tchw, chw, t, c, c1, c2);
}
}
} // namespace phi
PD_REGISTER_KERNEL(temporal_shift,
GPU,
ALL_LAYOUT,
phi::TemporalShiftKernel,
float,
double,
phi::dtype::float16) {}
|
d0f94cf919daffbc0f557af240622a3c2cf3b2d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/**
* Computacin Paralela (curso 1516)
*
* Alberto Gil
* Guillermo Cebrian
*/
// Includes generales
// Include para las utilidades de computacin paralela
/**
* Estructura antena
*/
typedef struct {
int y;
int x;
} Antena;
/**
* Macro para acceder a las posiciones del mapa
*/
#define m(y,x) mapa[ (y * cols) + x ]
/**
* Definimos el tamao de bloque
*/
#define TAMBLOCK 128
/* Funcion que inicializa la matriz al valor maximo */
/* Actualiza el mapa despues de colocar una antena*/
/**
* Funcin de ayuda para imprimir el mapa
*/
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} | d0f94cf919daffbc0f557af240622a3c2cf3b2d2.cu | #include "includes.h"
/**
* Computación Paralela (curso 1516)
*
* Alberto Gil
* Guillermo Cebrian
*/
// Includes generales
// Include para las utilidades de computación paralela
/**
* Estructura antena
*/
typedef struct {
int y;
int x;
} Antena;
/**
* Macro para acceder a las posiciones del mapa
*/
#define m(y,x) mapa[ (y * cols) + x ]
/**
* Definimos el tamaño de bloque
*/
#define TAMBLOCK 128
/* Funcion que inicializa la matriz al valor maximo */
/* Actualiza el mapa despues de colocar una antena*/
/**
* Función de ayuda para imprimir el mapa
*/
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} |
8e50bf9039892d2ef4d9062efb91f74bcb967370.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
csymv.cu is nearly identical to chemv.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> c, Sun Nov 20 20:20:29 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
chemv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_C_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_chemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements chemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_chemv_work requires users to provide a workspace, while
magmablas_chemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call chemv frequently, we suggest using
magmablas_chemv_work instead of magmablas_chemv. As the overhead to
allocate and free in device memory in magmablas_chemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( chemv_kernel_U), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( chemv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( chemv_kernel_L), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( chemv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_chemv_work
/***************************************************************************//**
Purpose
-------
magmablas_chemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_chemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_chemv
| 8e50bf9039892d2ef4d9062efb91f74bcb967370.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
csymv.cu is nearly identical to chemv.cu, just change names and drop MAGMA_C_CONJ.
chemv_kernel_U (upper) in chemv_upper.cu is very similar to
chemv_kernel_L (lower) in chemv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> c, Sun Nov 20 20:20:29 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
chemv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_C_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_C_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_C_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end chemv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
chemv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_chemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements chemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_chemv_work requires users to provide a workspace, while
magmablas_chemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call chemv frequently, we suggest using
magmablas_chemv_work instead of magmablas_chemv. As the overhead to
allocate and free in device memory in magmablas_chemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
chemv_kernel_U<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
chemv_kernel_U_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
chemv_kernel_L<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
chemv_kernel_L_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_chemv_work
/***************************************************************************//**
Purpose
-------
magmablas_chemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_chemv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_chemv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_chemv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_chemv
|
6d8e23ac4fad7582da83a6f983d862717c904950.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// --------------------------------------------------------
// R-FCN
// Written by Yi Li, 2016.
// --------------------------------------------------------
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/psroi_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void PSROIPoolingForward(
const int nthreads,
const Dtype* bottom_data,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois,
const int output_dim,
const int group_size,
Dtype* top_data,
int* mapping_channel) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
Dtype out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += bottom_data[bottom_index];
}
}
Dtype bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? 0. : out_sum/bin_area;
mapping_channel[index] = c;
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
LOG(INFO) << "forward gpu";
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data();
int count = top[0]->count();
caffe_set(count, Dtype(0), top_data);
caffe_set(count, -1, mapping_channel_ptr);
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_,
channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, output_dim_, group_size_,
top_data, mapping_channel_ptr);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PSROIPoolingBackwardAtomic(
const int nthreads,
const Dtype* top_diff,
const int* mapping_channel,
const int num_rois,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
Dtype* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
Dtype bin_area = (hend - hstart)*(wend - wstart);
Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0), bottom_diff);
const int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr,
top[0]->num(), spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, output_dim_, bottom_diff,
bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PSROIPoolingLayer);
} // namespace caffe
| 6d8e23ac4fad7582da83a6f983d862717c904950.cu | // --------------------------------------------------------
// R-FCN
// Written by Yi Li, 2016.
// --------------------------------------------------------
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/psroi_pooling_layer.hpp"
#include "caffe/util/gpu_util.cuh"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void PSROIPoolingForward(
const int nthreads,
const Dtype* bottom_data,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const Dtype* bottom_rois,
const int output_dim,
const int group_size,
Dtype* top_data,
int* mapping_channel) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
Dtype out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += bottom_data[bottom_index];
}
}
Dtype bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? 0. : out_sum/bin_area;
mapping_channel[index] = c;
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
LOG(INFO) << "forward gpu";
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data();
int count = top[0]->count();
caffe_set(count, Dtype(0), top_data);
caffe_set(count, -1, mapping_channel_ptr);
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_,
channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, output_dim_, group_size_,
top_data, mapping_channel_ptr);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PSROIPoolingBackwardAtomic(
const int nthreads,
const Dtype* top_diff,
const int* mapping_channel,
const int num_rois,
const Dtype spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w =
static_cast<Dtype>(round(bottom_rois[1])) * spatial_scale;
Dtype roi_start_h =
static_cast<Dtype>(round(bottom_rois[2])) * spatial_scale;
Dtype roi_end_w =
static_cast<Dtype>(round(bottom_rois[3]) + 1.) * spatial_scale;
Dtype roi_end_h =
static_cast<Dtype>(round(bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
Dtype roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
Dtype roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width);
int hstart = floor(static_cast<Dtype>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<Dtype>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<Dtype>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<Dtype>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
Dtype* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
Dtype bin_area = (hend - hstart)*(wend - wstart);
Dtype diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
template <typename Dtype>
void PSROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
const int* mapping_channel_ptr = mapping_channel_.gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
caffe_gpu_set(bottom_count, Dtype(0), bottom_diff);
const int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
PSROIPoolingBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr,
top[0]->num(), spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, output_dim_, bottom_diff,
bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PSROIPoolingLayer);
} // namespace caffe
|
e0c9fbe0c13786205ea1e16f41f5f37da5f64786.hip | // !!! This is a file automatically generated by hipify!!!
// Created by luozhiwang ([email protected])
// Date: 2020/3/13
#include "Int8Calibrator.h"
EntropyCalibratorV2::EntropyCalibratorV2(common::InputParams inputParams, common::TrtParams trtParams): mInputParams(std::move(inputParams)), mTrtParams(std::move(trtParams)){
pFunction = mInputParams.pFunction;
mImageSize = mInputParams.ImgC * mInputParams.ImgH * mInputParams.ImgW;
mInputCount = mInputParams.BatchSize * mImageSize;
CHECK(hipMalloc((void**)&mDevice_ptr, sizeof(float) * mInputCount));
mHost_ptr = new float[mInputCount];
// search file
std::vector<std::pair<std::string, std::string>> file_path;
mFileList = searchDirectory(std::vector<std::string>{mTrtParams.CalibrationImageDir}, std::vector<std::string>{".jpg", ".png"});
mStartPos = 0;
mCurPos = mStartPos + mInputParams.BatchSize;
mEndPos = mFileList.size();
}
bool EntropyCalibratorV2::getBatch(void **bindings, const char **names, int nbBindings) {
if(readIntoBuffer()&&update()){
CHECK(hipMemcpy(mDevice_ptr, mHost_ptr, sizeof(float) * mInputCount, hipMemcpyHostToDevice));
bindings[0] = mDevice_ptr;
return true;
}
return false;
}
EntropyCalibratorV2::~EntropyCalibratorV2() {
CHECK(hipFree(mDevice_ptr));
delete []mHost_ptr;
}
| e0c9fbe0c13786205ea1e16f41f5f37da5f64786.cu | // Created by luozhiwang ([email protected])
// Date: 2020/3/13
#include "Int8Calibrator.h"
EntropyCalibratorV2::EntropyCalibratorV2(common::InputParams inputParams, common::TrtParams trtParams): mInputParams(std::move(inputParams)), mTrtParams(std::move(trtParams)){
pFunction = mInputParams.pFunction;
mImageSize = mInputParams.ImgC * mInputParams.ImgH * mInputParams.ImgW;
mInputCount = mInputParams.BatchSize * mImageSize;
CHECK(cudaMalloc((void**)&mDevice_ptr, sizeof(float) * mInputCount));
mHost_ptr = new float[mInputCount];
// search file
std::vector<std::pair<std::string, std::string>> file_path;
mFileList = searchDirectory(std::vector<std::string>{mTrtParams.CalibrationImageDir}, std::vector<std::string>{".jpg", ".png"});
mStartPos = 0;
mCurPos = mStartPos + mInputParams.BatchSize;
mEndPos = mFileList.size();
}
bool EntropyCalibratorV2::getBatch(void **bindings, const char **names, int nbBindings) {
if(readIntoBuffer()&&update()){
CHECK(cudaMemcpy(mDevice_ptr, mHost_ptr, sizeof(float) * mInputCount, cudaMemcpyHostToDevice));
bindings[0] = mDevice_ptr;
return true;
}
return false;
}
EntropyCalibratorV2::~EntropyCalibratorV2() {
CHECK(cudaFree(mDevice_ptr));
delete []mHost_ptr;
}
|
5f5fe56453ca1749df3a137da629b60f0ef8067e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/abs.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
__DEVICE__
schar abs_device(const schar& src) {
if (src == -128) {
return 127;
}
else {
return abs((int)src);
}
}
__DEVICE__
float abs_device(const float& src) {
if (src >= 0) {
return src;
}
else {
return (0 - src);
}
}
__global__
void absKernel0(const schar* src, int rows, int cols, int src_stride,
schar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const char4* input = (char4*)(src + element_y * src_stride);
char4 input_value = input[element_x];
char4 output_value;
output_value.x = abs_device(input_value.x);
output_value.y = abs_device(input_value.y);
output_value.z = abs_device(input_value.z);
output_value.w = abs_device(input_value.w);
char4* output = (char4*)(dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void absKernel10(const schar* src, int cols, schar* dst) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const char4* input = (char4*)src;
char4 input_value, output_value;
input_value = input[element_x];
if (index_x < cols - 4) {
output_value.x = abs_device(input_value.x);
output_value.y = abs_device(input_value.y);
output_value.z = abs_device(input_value.z);
output_value.w = abs_device(input_value.w);
char4* output = (char4*)dst;
output[element_x] = output_value;
}
else {
output_value.x = abs_device(input_value.x);
if (index_x < cols - 1) {
output_value.y = abs_device(input_value.y);
}
if (index_x < cols - 2) {
output_value.z = abs_device(input_value.z);
}
if (index_x < cols - 3) {
output_value.w = abs_device(input_value.w);
}
dst[index_x] = output_value.x;
if (index_x < cols - 1) {
dst[index_x + 1] = output_value.y;
}
if (index_x < cols - 2) {
dst[index_x + 2] = output_value.z;
}
if (index_x < cols - 3) {
dst[index_x + 3] = output_value.w;
}
}
}
__global__
void absKernel11(const schar* src, int rows, int cols, int src_stride,
schar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int index_x = element_x << 2;
if (element_y >= rows || index_x >= cols) {
return;
}
const schar* input = src + element_y * src_stride;
schar* output = dst + element_y * dst_stride;
schar input_value0, input_value1, input_value2, input_value3;
schar output_value0, output_value1, output_value2, output_value3;
if (blockIdx.x < gridDim.x - 1) {
input_value0 = input[index_x];
input_value1 = input[index_x + 1];
input_value2 = input[index_x + 2];
input_value3 = input[index_x + 3];
output_value0 = abs_device(input_value0);
output_value1 = abs_device(input_value1);
output_value2 = abs_device(input_value2);
output_value3 = abs_device(input_value3);
output[index_x] = output_value0;
output[index_x + 1] = output_value1;
output[index_x + 2] = output_value2;
output[index_x + 3] = output_value3;
}
else {
input_value0 = input[index_x];
if (index_x < cols - 1) {
input_value1 = input[index_x + 1];
}
if (index_x < cols - 2) {
input_value2 = input[index_x + 2];
}
if (index_x < cols - 3) {
input_value3 = input[index_x + 3];
}
output_value0 = abs_device(input_value0);
if (index_x < cols - 1) {
output_value1 = abs_device(input_value1);
}
if (index_x < cols - 2) {
output_value2 = abs_device(input_value2);
}
if (index_x < cols - 3) {
output_value3 = abs_device(input_value3);
}
output[index_x] = output_value0;
if (index_x < cols - 1) {
output[index_x + 1] = output_value1;
}
if (index_x < cols - 2) {
output[index_x + 2] = output_value2;
}
if (index_x < cols - 3) {
output[index_x + 3] = output_value3;
}
}
}
__global__
void absKernel0(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input = (float2*)((uchar*)src + element_y * src_stride);
float2 input_value = input[element_x];
float2 output_value;
output_value.x = abs_device(input_value.x);
output_value.y = abs_device(input_value.y);
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void absKernel1(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float* input = (float*)((uchar*)src + element_y * src_stride);
float* output = (float*)((uchar*)dst + element_y * dst_stride);
float input_value0, input_value1;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value0 = input[element_x];
input_value1 = input[element_x + 1];
output_value0 = abs_device(input_value0);
output_value1 = abs_device(input_value1);
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value0 = input[element_x];
if (element_x != cols - 1) {
input_value1 = input[element_x + 1];
}
output_value0 = abs_device(input_value0);
if (element_x != cols - 1) {
output_value1 = abs_device(input_value1);
}
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
RetCode abs(const schar* src, int rows, int cols, int channels, int src_stride,
schar* dst, int dst_stride, hipStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(schar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(schar));
int columns = cols * channels;
cols = divideUp(columns, 4, 2);
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src_stride & 3) == 0 && (dst_stride & 3) == 0) {
hipLaunchKernelGGL(( absKernel0), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, dst,
dst_stride);
}
else if (src_stride == columns && dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
hipLaunchKernelGGL(( absKernel10), dim3(grid), dim3(block), 0, stream, src, columns, dst);
}
else {
hipLaunchKernelGGL(( absKernel11), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, dst,
dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode abs(const float* src, int rows, int cols, int channels, int src_stride,
float* dst, int dst_stride, hipStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src_stride & 7) == 0 && (dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
hipLaunchKernelGGL(( absKernel0), dim3(grid), dim3(block), 0, stream, src, rows, cols, src_stride, dst,
dst_stride);
}
else {
hipLaunchKernelGGL(( absKernel1), dim3(grid), dim3(block), 0, stream, src, rows, columns, src_stride, dst,
dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Abs<schar, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const schar* inData,
int outWidthStride,
schar* outData) {
RetCode code = abs(inData, height, width, 1, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<schar, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const schar* inData,
int outWidthStride,
schar* outData) {
RetCode code = abs(inData, height, width, 3, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<schar, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const schar* inData,
int outWidthStride,
schar* outData) {
RetCode code = abs(inData, height, width, 4, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<float, 1>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = abs(inData, height, width, 1, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<float, 3>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = abs(inData, height, width, 3, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<float, 4>(hipStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = abs(inData, height, width, 4, inWidthStride, outData,
outWidthStride, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
| 5f5fe56453ca1749df3a137da629b60f0ef8067e.cu | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/abs.h"
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
__DEVICE__
schar abs_device(const schar& src) {
if (src == -128) {
return 127;
}
else {
return abs((int)src);
}
}
__DEVICE__
float abs_device(const float& src) {
if (src >= 0) {
return src;
}
else {
return (0 - src);
}
}
__global__
void absKernel0(const schar* src, int rows, int cols, int src_stride,
schar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const char4* input = (char4*)(src + element_y * src_stride);
char4 input_value = input[element_x];
char4 output_value;
output_value.x = abs_device(input_value.x);
output_value.y = abs_device(input_value.y);
output_value.z = abs_device(input_value.z);
output_value.w = abs_device(input_value.w);
char4* output = (char4*)(dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void absKernel10(const schar* src, int cols, schar* dst) {
int element_x = (blockIdx.x << 8) + threadIdx.x;
int index_x = element_x << 2;
if (index_x >= cols) {
return;
}
const char4* input = (char4*)src;
char4 input_value, output_value;
input_value = input[element_x];
if (index_x < cols - 4) {
output_value.x = abs_device(input_value.x);
output_value.y = abs_device(input_value.y);
output_value.z = abs_device(input_value.z);
output_value.w = abs_device(input_value.w);
char4* output = (char4*)dst;
output[element_x] = output_value;
}
else {
output_value.x = abs_device(input_value.x);
if (index_x < cols - 1) {
output_value.y = abs_device(input_value.y);
}
if (index_x < cols - 2) {
output_value.z = abs_device(input_value.z);
}
if (index_x < cols - 3) {
output_value.w = abs_device(input_value.w);
}
dst[index_x] = output_value.x;
if (index_x < cols - 1) {
dst[index_x + 1] = output_value.y;
}
if (index_x < cols - 2) {
dst[index_x + 2] = output_value.z;
}
if (index_x < cols - 3) {
dst[index_x + 3] = output_value.w;
}
}
}
__global__
void absKernel11(const schar* src, int rows, int cols, int src_stride,
schar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int index_x = element_x << 2;
if (element_y >= rows || index_x >= cols) {
return;
}
const schar* input = src + element_y * src_stride;
schar* output = dst + element_y * dst_stride;
schar input_value0, input_value1, input_value2, input_value3;
schar output_value0, output_value1, output_value2, output_value3;
if (blockIdx.x < gridDim.x - 1) {
input_value0 = input[index_x];
input_value1 = input[index_x + 1];
input_value2 = input[index_x + 2];
input_value3 = input[index_x + 3];
output_value0 = abs_device(input_value0);
output_value1 = abs_device(input_value1);
output_value2 = abs_device(input_value2);
output_value3 = abs_device(input_value3);
output[index_x] = output_value0;
output[index_x + 1] = output_value1;
output[index_x + 2] = output_value2;
output[index_x + 3] = output_value3;
}
else {
input_value0 = input[index_x];
if (index_x < cols - 1) {
input_value1 = input[index_x + 1];
}
if (index_x < cols - 2) {
input_value2 = input[index_x + 2];
}
if (index_x < cols - 3) {
input_value3 = input[index_x + 3];
}
output_value0 = abs_device(input_value0);
if (index_x < cols - 1) {
output_value1 = abs_device(input_value1);
}
if (index_x < cols - 2) {
output_value2 = abs_device(input_value2);
}
if (index_x < cols - 3) {
output_value3 = abs_device(input_value3);
}
output[index_x] = output_value0;
if (index_x < cols - 1) {
output[index_x + 1] = output_value1;
}
if (index_x < cols - 2) {
output[index_x + 2] = output_value2;
}
if (index_x < cols - 3) {
output[index_x + 3] = output_value3;
}
}
}
__global__
void absKernel0(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float2* input = (float2*)((uchar*)src + element_y * src_stride);
float2 input_value = input[element_x];
float2 output_value;
output_value.x = abs_device(input_value.x);
output_value.y = abs_device(input_value.y);
float2* output = (float2*)((uchar*)dst + element_y * dst_stride);
output[element_x] = output_value;
}
__global__
void absKernel1(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const float* input = (float*)((uchar*)src + element_y * src_stride);
float* output = (float*)((uchar*)dst + element_y * dst_stride);
float input_value0, input_value1;
float output_value0, output_value1;
if (blockIdx.x < gridDim.x - 1) {
input_value0 = input[element_x];
input_value1 = input[element_x + 1];
output_value0 = abs_device(input_value0);
output_value1 = abs_device(input_value1);
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
input_value0 = input[element_x];
if (element_x != cols - 1) {
input_value1 = input[element_x + 1];
}
output_value0 = abs_device(input_value0);
if (element_x != cols - 1) {
output_value1 = abs_device(input_value1);
}
output[element_x] = output_value0;
if (element_x != cols - 1) {
output[element_x + 1] = output_value1;
}
}
}
RetCode abs(const schar* src, int rows, int cols, int channels, int src_stride,
schar* dst, int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(schar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(schar));
int columns = cols * channels;
cols = divideUp(columns, 4, 2);
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src_stride & 3) == 0 && (dst_stride & 3) == 0) {
absKernel0<<<grid, block, 0, stream>>>(src, rows, cols, src_stride, dst,
dst_stride);
}
else if (src_stride == columns && dst_stride == columns) {
columns *= rows;
cols = divideUp(columns, 4, 2);
block.x = 256;
block.y = 1;
grid.x = divideUp(cols, 256, 8);
grid.y = 1;
absKernel10<<<grid, block, 0, stream>>>(src, columns, dst);
}
else {
absKernel11<<<grid, block, 0, stream>>>(src, rows, cols, src_stride, dst,
dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode abs(const float* src, int rows, int cols, int channels, int src_stride,
float* dst, int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
if ((src_stride & 7) == 0 && (dst_stride & 7) == 0) {
cols = divideUp(columns, 2, 1);
absKernel0<<<grid, block, 0, stream>>>(src, rows, cols, src_stride, dst,
dst_stride);
}
else {
absKernel1<<<grid, block, 0, stream>>>(src, rows, columns, src_stride, dst,
dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode Abs<schar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const schar* inData,
int outWidthStride,
schar* outData) {
RetCode code = abs(inData, height, width, 1, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<schar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const schar* inData,
int outWidthStride,
schar* outData) {
RetCode code = abs(inData, height, width, 3, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<schar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const schar* inData,
int outWidthStride,
schar* outData) {
RetCode code = abs(inData, height, width, 4, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = abs(inData, height, width, 1, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = abs(inData, height, width, 3, inWidthStride, outData,
outWidthStride, stream);
return code;
}
template <>
RetCode Abs<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = abs(inData, height, width, 4, inWidthStride, outData,
outWidthStride, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
f3c633440805f5727fff292b54fb9459d30ff00f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//nvcc -o lab5_21 lab5_21.cu
/*Author:Pedro Silva*/
/*2. Implemente um programa em CUDA que calcule a soma de todos os elementos de um vetor de
tamanho N. Teste para vrios valores de N.*/
/*2.1. Implemente uma verso simples (sem recorrer a optimizaes).*/
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
__global__ void vectorsum2_1(int * device_buffer, int N){
//THREAD ID
int index = threadIdx.x + blockIdx.x * blockDim.x;
/*Temos N elementos no vector. Vamos partir a soma de todos os elementos como a soma de um elemento com o elemento sua direita.
Repetir at so termos um elemento (a cada iterao, temos metade dos elementos).*/
//Assumir que s lanamos um bloco de threads (blockIdx.x = 1 para todas as threads.)
////int num_of_threads = blockDim.x;
int distance = blockDim.x; //Distancia inicial entre elementos a somar
int num_of_blocks = N / blockDim.x + 1;
/*Este ciclo executa enquanto tivermos mais que uma thread e so se a thread ainda estiver no "scope" da soma.*/
while(num_of_blocks > 1 && blockIdx.x < num_of_blocks && index < N){
int primeiro = index * distance * 2; //na primeira iteracao: 1a thread, index 0, 2a thread, index 2, 3a thread, index 4
int segundo = primeiro + distance; //na primeira iteracao: 1a thread, index 1, 2a thread, index 3, 3a thread, index 5
printf("DEVICE: Thread %i. A somar %i + %i\n", index, device_buffer[primeiro], device_buffer[segundo]);
device_buffer[primeiro] = device_buffer[primeiro] + device_buffer[segundo];
//passou uma iteracao: duplicar a distancia entre elementos a somar e dividir por 2 o numero de threads activas
distance *= 2;
num_of_blocks--;
}
int num_of_threads = blockDim.x/2;
if(num_of_blocks == 1 && num_of_threads > 1){
int primeiro = index * 2;
int segundo = primeiro + 1;
device_buffer[primeiro] = primeiro + segundo;
num_of_threads /=2;
}
}
int main(){
struct timespec start_device, end_device, start_host, end_host;
double initialTime, finalTime;
int result;
for(int N = 8; N <= 1024; N = N*2){
printf("N = %i.\n", N);
int *device_buffer = NULL;
int *host_buffer = NULL;
int err = hipMalloc(&device_buffer, sizeof(int) * N);
if(err != hipSuccess){
fprintf(stderr, "Error allocating memory on device.\n");
return(-1);
}
//Inicializar vector N:
host_buffer = (int*)malloc(N * sizeof(int));
for(int i = 0; i < N; i++)
host_buffer[i] = i;
//DEVICE
//enviar dados para device
hipMemcpy(device_buffer, host_buffer, N * sizeof(int), hipMemcpyHostToDevice);
//comecar computacao
clock_gettime(CLOCK_MONOTONIC, &start_device);
hipLaunchKernelGGL(( vectorsum2_1), dim3(N/256 + 1), dim3(256), 0, 0, device_buffer, N);
clock_gettime(CLOCK_MONOTONIC, &end_device);
//cronometrar
initialTime = (start_device.tv_sec*1e3) + (start_device.tv_nsec*1e-6);
finalTime = (end_device.tv_sec*1e3) + (end_device.tv_nsec*1e-6);
/*Vamos buscar o resultado da soma ao primeiro elemento do device_buffer*/
hipMemcpy(&result, device_buffer, sizeof(int), hipMemcpyDeviceToHost);
printf("DEVICE: Resultado da soma de um vector de %i elementos: %i.\n", N, result);
printf("DEVICE: Tempo de execuo (device): \t%fms.\n", (finalTime - initialTime));
//HOST
result = 0;
clock_gettime(CLOCK_MONOTONIC, &start_host);
for(int i = 0; i < N; i++)
result += host_buffer[i];
clock_gettime(CLOCK_MONOTONIC, &end_host);
initialTime = (start_host.tv_sec*1e3) + (start_host.tv_nsec*1e-6);
finalTime = (end_host.tv_sec*1e3) + (end_host.tv_nsec*1e-6);
printf("HOST: Resultado da soma de um vector de %i elementos: %i.\n", N, result);
printf("HOST: Tempo de execuo (device): \t%fms.\n", (finalTime - initialTime));
hipFree(device_buffer);
free(host_buffer);
return 0; //TEMPORARIO. So quero testar para N = 8
}
return 0;
}
| f3c633440805f5727fff292b54fb9459d30ff00f.cu | //nvcc -o lab5_21 lab5_21.cu
/*Author:Pedro Silva*/
/*2. Implemente um programa em CUDA que calcule a soma de todos os elementos de um vetor de
tamanho N. Teste para vários valores de N.*/
/*2.1. Implemente uma versão simples (sem recorrer a optimizações).*/
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
__global__ void vectorsum2_1(int * device_buffer, int N){
//THREAD ID
int index = threadIdx.x + blockIdx.x * blockDim.x;
/*Temos N elementos no vector. Vamos partir a soma de todos os elementos como a soma de um elemento com o elemento à sua direita.
Repetir até so termos um elemento (a cada iteração, temos metade dos elementos).*/
//Assumir que só lançamos um bloco de threads (blockIdx.x = 1 para todas as threads.)
////int num_of_threads = blockDim.x;
int distance = blockDim.x; //Distancia inicial entre elementos a somar
int num_of_blocks = N / blockDim.x + 1;
/*Este ciclo executa enquanto tivermos mais que uma thread e so se a thread ainda estiver no "scope" da soma.*/
while(num_of_blocks > 1 && blockIdx.x < num_of_blocks && index < N){
int primeiro = index * distance * 2; //na primeira iteracao: 1a thread, index 0, 2a thread, index 2, 3a thread, index 4
int segundo = primeiro + distance; //na primeira iteracao: 1a thread, index 1, 2a thread, index 3, 3a thread, index 5
printf("DEVICE: Thread %i. A somar %i + %i\n", index, device_buffer[primeiro], device_buffer[segundo]);
device_buffer[primeiro] = device_buffer[primeiro] + device_buffer[segundo];
//passou uma iteracao: duplicar a distancia entre elementos a somar e dividir por 2 o numero de threads activas
distance *= 2;
num_of_blocks--;
}
int num_of_threads = blockDim.x/2;
if(num_of_blocks == 1 && num_of_threads > 1){
int primeiro = index * 2;
int segundo = primeiro + 1;
device_buffer[primeiro] = primeiro + segundo;
num_of_threads /=2;
}
}
int main(){
struct timespec start_device, end_device, start_host, end_host;
double initialTime, finalTime;
int result;
for(int N = 8; N <= 1024; N = N*2){
printf("N = %i.\n", N);
int *device_buffer = NULL;
int *host_buffer = NULL;
int err = cudaMalloc(&device_buffer, sizeof(int) * N);
if(err != cudaSuccess){
fprintf(stderr, "Error allocating memory on device.\n");
return(-1);
}
//Inicializar vector N:
host_buffer = (int*)malloc(N * sizeof(int));
for(int i = 0; i < N; i++)
host_buffer[i] = i;
//DEVICE
//enviar dados para device
cudaMemcpy(device_buffer, host_buffer, N * sizeof(int), cudaMemcpyHostToDevice);
//comecar computacao
clock_gettime(CLOCK_MONOTONIC, &start_device);
vectorsum2_1<<< N/256 + 1, 256>>>(device_buffer, N);
clock_gettime(CLOCK_MONOTONIC, &end_device);
//cronometrar
initialTime = (start_device.tv_sec*1e3) + (start_device.tv_nsec*1e-6);
finalTime = (end_device.tv_sec*1e3) + (end_device.tv_nsec*1e-6);
/*Vamos buscar o resultado da soma ao primeiro elemento do device_buffer*/
cudaMemcpy(&result, device_buffer, sizeof(int), cudaMemcpyDeviceToHost);
printf("DEVICE: Resultado da soma de um vector de %i elementos: %i.\n", N, result);
printf("DEVICE: Tempo de execução (device): \t%fms.\n", (finalTime - initialTime));
//HOST
result = 0;
clock_gettime(CLOCK_MONOTONIC, &start_host);
for(int i = 0; i < N; i++)
result += host_buffer[i];
clock_gettime(CLOCK_MONOTONIC, &end_host);
initialTime = (start_host.tv_sec*1e3) + (start_host.tv_nsec*1e-6);
finalTime = (end_host.tv_sec*1e3) + (end_host.tv_nsec*1e-6);
printf("HOST: Resultado da soma de um vector de %i elementos: %i.\n", N, result);
printf("HOST: Tempo de execução (device): \t%fms.\n", (finalTime - initialTime));
cudaFree(device_buffer);
free(host_buffer);
return 0; //TEMPORARIO. So quero testar para N = 8
}
return 0;
}
|
a1063c162fd6d9a83dc0d9b18a3b87a3659261d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <algorithm>
#include "FASTAParsers.h"
#include "SWSolver_char.h"
#define GAP_PENALTY 2
// define affine penalty ?
#define FROM_LEFT 1
#define FROM_TOP 2
#define FROM_TOP_LEFT 3
#define MAX_BLOCK_SIZE 1024
#define MAX_GRID_DIM 65535
#define BLOCK_Y_DIM 32.0
// first is sequence ID, second is max score
int blosum50_alpha[25][25] = {
// A B C D E F G H I J K L M N P Q R S T V W X Y Z *
/* A */ { 5,-2,-1,-2,-1,-3, 0,-2,-1,-2,-1,-2,-1,-1,-1,-1,-2, 1, 0, 0,-3,-1,-2,-1,-5 },
/* B */ {-2, 6,-3, 6, 1,-4,-1, 0,-4,-4, 0,-4,-3, 5,-2, 0,-1, 0, 0,-3,-5,-1,-3, 1,-5 },
/* C */ {-1,-3,13,-4,-3,-2,-3,-3,-2,-2,-3,-2,-2,-2,-4,-3,-4,-1,-1,-1,-5,-1,-3,-3,-5 },
/* D */ {-2, 6,-4, 8, 2,-5,-1,-1,-4,-4,-1,-4,-4, 2,-1, 0,-2, 0,-1,-4,-5,-1,-3, 1,-5 },
/* E */ {-1, 1,-3, 2, 6,-3,-3, 0,-4,-3, 1,-3,-2, 0,-1, 2, 0,-1,-1,-3,-3,-1,-2, 5,-5 },
/* F */ {-3,-4,-2,-5,-3, 8,-4,-1, 0, 1,-4, 1, 0,-4,-4,-4,-3,-3,-2,-1, 1,-1, 4,-4,-5 },
/* G */ { 0,-1,-3,-1,-3,-4, 8,-2,-4,-4,-2,-4,-3, 0,-2,-2,-3, 0,-2,-4,-3,-1,-3,-2,-5 },
/* H */ {-2, 0,-3,-1, 0,-1,-2,10,-4,-3, 0,-3,-1, 1,-2, 1, 0,-1,-2,-4,-3,-1, 2, 0,-5 },
/* I */ {-1,-4,-2,-4,-4, 0,-4,-4, 5, 4,-3, 2, 2,-3,-3,-3,-4,-3,-1, 4,-3,-1,-1,-3,-5 },
/* J */ {-2,-4,-2,-4,-3, 1,-4,-3, 4, 4,-3, 4, 2,-4,-3,-3,-3,-3,-1, 2,-2,-1,-1,-3,-5 },
/* K */ {-1, 0,-3,-1, 1,-4,-2, 0,-3,-3, 6,-3,-2, 0,-1, 2, 3, 0,-1,-3,-3,-1,-2, 1,-5 },
/* L */ {-2,-4,-2,-4,-3, 1,-4,-3, 2, 4,-3, 5, 3,-4,-4,-2,-3,-3,-1, 1, 2,-1,-1,-3,-5 },
/* M */ {-1,-3,-2,-4,-2, 0,-3,-1, 2, 2,-2, 3, 7,-2,-3, 0,-2,-2,-1, 1,-1,-1, 0,-1,-5 },
/* N */ {-1, 5,-2, 2, 0,-4, 0, 1,-3,-4, 0,-4,-2, 7,-2, 0,-1, 1, 0,-3,-4,-1,-2, 0,-5 },
/* P */ {-1,-2,-4,-1,-1,-4,-2,-2,-3,-3,-1,-4,-3,-2,10,-1,-3,-1,-1,-3,-4,-1,-3,-1,-5 },
/* Q */ {-1, 0,-3, 0, 2,-4,-2, 1,-3,-3, 2,-2, 0, 0,-1, 7, 1, 0,-1,-3,-1,-1,-1, 4,-5 },
/* R */ {-2,-1,-4,-2, 0,-3,-3, 0,-4,-3, 3,-3,-2,-1,-3, 1, 7,-1,-1,-3,-3,-1,-1, 0,-5 },
/* S */ { 1, 0,-1, 0,-1,-3, 0,-1,-3,-3, 0,-3,-2, 1,-1, 0,-1, 5, 2,-2,-4,-1,-2, 0,-5 },
/* T */ { 0, 0,-1,-1,-1,-2,-2,-2,-1,-1,-1,-1,-1, 0,-1,-1,-1, 2, 5, 0,-3,-1,-2,-1,-5 },
/* V */ { 0,-3,-1,-4,-3,-1,-4,-4, 4, 2,-3, 1, 1,-3,-3,-3,-3,-2, 0, 5,-3,-1,-1,-3,-5 },
/* W */ {-3,-5,-5,-5,-3, 1,-3,-3,-3,-2,-3,-2,-1,-4,-4,-1,-3,-4,-3,-3,15,-1, 2,-2,-5 },
/* X */ {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-5 },
/* Y */ {-2,-3,-3,-3,-2, 4,-3, 2,-1,-1,-2,-1, 0,-2,-3,-1,-1,-2,-2,-1, 2,-1, 8,-2,-5 },
/* Z */ {-1, 1,-3, 1, 5,-4,-2, 0,-3,-3, 1,-3,-1, 0,-1, 4, 0, 0,-1,-3,-2,-1,-2, 5,-5 },
/* * */ {-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, 1 }
};
using namespace std;
__constant__ char constQuery[1024];
__constant__ int constSubstitutionMatrix[625];
char convertStringToChar(char character) {
switch(character)
{
case 'A': { return 'A'; }
case 'R': { return 'R'; }
case 'N': { return 'N'; }
case 'D': { return 'D'; }
case 'C': { return 'C'; }
case 'Q': { return 'Q'; }
case 'E': { return 'E'; }
case 'G': { return 'G'; }
case 'H': { return 'H'; }
case 'I': { return 'I'; }
case 'L': { return 'L'; }
case 'K': { return 'K'; }
case 'M': { return 'M'; }
case 'F': { return 'F'; }
case 'P': { return 'P'; }
case 'S': { return 'S'; }
case 'T': { return 'T'; }
case 'W': { return 'W'; }
case 'Y': { return 'Y'; }
case 'V': { return 'V'; }
case 'B': { return 'B'; }
case 'J': { return 'J'; }
case 'Z': { return 'Z'; }
case 'X': { return 'X'; }
}
return '*';
}
// Kernel function for computing the scoring matrix of a sequence
__global__ void f_scoreSequence(char* subject, float* scoringMatrix, float* maxScoreList, int width, int height, int numSubjects) {
//register int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
register int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
if (yIndex == 0) {
//printf("GPU: %d %f\n", constSubstitutionMatrix[24], constQuery[0]);
}
float maxScore = 0;
if (yIndex < numSubjects) {
for (int i = 1; i < (height + 1); i++) {
for (int j = 1; j < (width + 1); j++) {
float score = 0;
score = max(score, scoringMatrix[(width + 1)*(height + 1)*yIndex + (i * (width + 1)) + j - 1] - GAP_PENALTY);
score = max(score, scoringMatrix[(width + 1)*(height + 1)*yIndex + ((i - 1) * (width + 1)) + j] - GAP_PENALTY);
int similarityScore = 0;
int query = (int)constQuery[i-1];
int subjectValue = (int)subject[width*yIndex + j -1];
int searchMatrix;
// first determine how to search the constSubstitutionMatrix
// have both in V-*, 1 in V-Z 1 in P-T (vice versa flip them), 1 in V-Z 1 in A-N (vice versa flip them),
// both in P-T, 1 in P-T 1 in A-N (vice versa flip them), both in A-N
// 6 possibilities after collapsing vice versas
if ((query > 85 || query == 42) && (subjectValue > 85 || subjectValue == 42)) // both in V-*
searchMatrix = 1;
else if ((query > 85 || query == 42) && (subjectValue > 79)) // query in V-*, subjectValue in P-T
searchMatrix = 2;
else if ((query > 79) && (subjectValue > 85 || subjectValue == 42)) {// query in P-T, subjectValue in V-*
int temp = query;
query = subjectValue;
subjectValue = temp;
searchMatrix = 2;
}
else if (query > 85 || query == 42) // query in V-*, subjectValue in A-N
searchMatrix = 3;
else if (subjectValue > 85 || subjectValue == 42) { // query in A-N, subjectValue in V-*
int temp = query;
query = subjectValue;
subjectValue = temp;
searchMatrix = 3;
}
else if (query > 79 && subjectValue > 79) // both in P-T
searchMatrix = 4;
else if (query > 79) // query in P-T, subjectValue in A-N
searchMatrix = 5;
else if (subjectValue > 79) { // query in A-N, subjectValue in P-T
int temp = query;
query = subjectValue;
subjectValue = temp;
searchMatrix = 5;
}
else // both in A-N
searchMatrix = 6;
// based on the searchMatrix value, use a switch case and calculate the similarityScore
// if value is * (42), result is either -5 (not similar) or 1 (similar)
switch(searchMatrix)
{
case 1: {
if (query == 42 && subjectValue == 42) similarityScore = 1;
else if ((query == 42 && subjectValue != 42) || (query != 42 && subjectValue == 42)) similarityScore = -5;
else similarityScore = constSubstitutionMatrix[((query-67) * 25) + subjectValue-67];
break; }
case 2: {
if (query == 42) similarityScore = -5;
else similarityScore = constSubstitutionMatrix[((query-67) * 25) + subjectValue-66];
break; }
case 3: {
if (query == 42) similarityScore = -5;
else similarityScore = constSubstitutionMatrix[((query-67) * 25) + subjectValue-65];
break; }
case 4: {
similarityScore = constSubstitutionMatrix[((query-66) * 25) + subjectValue-66];
break; }
case 5: {
similarityScore = constSubstitutionMatrix[((query-66) * 25) + subjectValue-65];
break; }
case 6: {
similarityScore = constSubstitutionMatrix[((query-65) * 25) + subjectValue-65];
break; }
}
//int similarityScore = constSubstitutionMatrix[((int)constQuery[i - 1] * 25) + (int)subject[width*yIndex + j - 1]];
score = max(score, scoringMatrix[(width + 1)*(height + 1)*yIndex + ((i - 1) * (width + 1)) + j - 1] + similarityScore);
maxScore = max(maxScore, score);
scoringMatrix[(width + 1)*(height + 1)*yIndex + (i * (width + 1)) + j] = score;
}
}
maxScoreList[yIndex] = maxScore;
}
}
vector<seqid_score> smith_waterman_cuda_char(FASTAQuery &query, FASTADatabase &db) {
string querySequenceBuffer = query.get_buffer();
char querySequence[querySequenceBuffer.length() + 1];
copy(querySequenceBuffer.begin(), querySequenceBuffer.end(), querySequence);
querySequence[querySequenceBuffer.length()] = '\0';
vector<seqid_score> scores;
char* d_input_subject;
hipMallocManaged((void**) &d_input_subject, (db.largestSubjectLength * db.numSubjects) * sizeof(char));
/*
memcpy(d_input_subject, subjectSequences[0], ((largestSubjectLength * numSubjects) + 1) * sizeof(char));
for (int i = 0; i < numSubjects; i++) {
strcat(d_input_subject, subjectSequences[i]);
}
*/
// Set up offsets
int grid_y_dim = ceil(db.numSubjects / BLOCK_Y_DIM);
char* d_input_offsets;
hipMallocManaged((void**) &d_input_offsets, grid_y_dim * sizeof(char));
float* d_output_scoring;
hipMallocManaged((void**) &d_output_scoring, ((strlen(querySequence) + 1) * (db.largestSubjectLength + 1) * db.numSubjects) * sizeof(float));
float* d_output_max_score;
hipMallocManaged((void**) &d_output_max_score, db.numSubjects * sizeof(float));
for (int i = 0; i < db.numSubjects; i++) {
for (int j = 0; j < db.largestSubjectLength; j++) { // Will need to pad here
if (j < db.subjectSequences[i].sequence.length()) {
d_input_subject[i*db.largestSubjectLength + j] = convertStringToChar(db.subjectSequences[i].sequence[j]);
}
else d_input_subject[i*db.largestSubjectLength + j] = '*';
}
}
hipMemcpyToSymbol(constQuery, querySequence, sizeof(char)*strlen(querySequence));
hipMemcpyToSymbol(constSubstitutionMatrix, blosum50_alpha, sizeof(int)*625);
// Call GPU
dim3 block(1, BLOCK_Y_DIM);
dim3 grid(1, grid_y_dim);
hipLaunchKernelGGL(( f_scoreSequence), dim3(grid), dim3(block), 0, 0, d_input_subject, d_output_scoring, d_output_max_score, db.largestSubjectLength, strlen(querySequence), db.numSubjects);
hipDeviceSynchronize();
cout << endl << db.subjectSequences[0].sequence << endl;
/*
// Print results for 1 subject query
for (int subject = 0; subject < db.numSubjects; subject++) {
string seqA = querySequence;
string seqB = db.subjectSequences[subject].sequence;
cout << " ";
for (int j = 0; j < (seqB.length() + 1); j++) {
cout << seqB[j] << " ";
}
cout << endl;
for (int i = 0; i < (seqA.length() + 1); i++) {
if (i != 0) cout << seqA[i - 1] << " ";
else cout << " ";
for (int j = 0; j < (seqB.length() + 1); j++) {
cout << d_output_scoring[((db.largestSubjectLength + 1) * (querySequence.length() + 1) * subject) + (i * (seqB.length() + 1)) + j] << " ";
}
cout << endl;
}
}*/
// Print results for 1 subject query
for (int subject = 0; subject < db.numSubjects; subject++) {
scores.push_back(make_pair(db.subjectSequences[subject].id, d_output_max_score[subject])); // change this
}
// Free device memory
hipFree(d_input_subject);
hipFree(d_input_offsets);
hipFree(d_output_scoring);
hipFree(d_output_max_score);
hipDeviceReset();
return scores;
}
| a1063c162fd6d9a83dc0d9b18a3b87a3659261d9.cu | #include <iostream>
#include <algorithm>
#include "FASTAParsers.h"
#include "SWSolver_char.h"
#define GAP_PENALTY 2
// define affine penalty ?
#define FROM_LEFT 1
#define FROM_TOP 2
#define FROM_TOP_LEFT 3
#define MAX_BLOCK_SIZE 1024
#define MAX_GRID_DIM 65535
#define BLOCK_Y_DIM 32.0
// first is sequence ID, second is max score
int blosum50_alpha[25][25] = {
// A B C D E F G H I J K L M N P Q R S T V W X Y Z *
/* A */ { 5,-2,-1,-2,-1,-3, 0,-2,-1,-2,-1,-2,-1,-1,-1,-1,-2, 1, 0, 0,-3,-1,-2,-1,-5 },
/* B */ {-2, 6,-3, 6, 1,-4,-1, 0,-4,-4, 0,-4,-3, 5,-2, 0,-1, 0, 0,-3,-5,-1,-3, 1,-5 },
/* C */ {-1,-3,13,-4,-3,-2,-3,-3,-2,-2,-3,-2,-2,-2,-4,-3,-4,-1,-1,-1,-5,-1,-3,-3,-5 },
/* D */ {-2, 6,-4, 8, 2,-5,-1,-1,-4,-4,-1,-4,-4, 2,-1, 0,-2, 0,-1,-4,-5,-1,-3, 1,-5 },
/* E */ {-1, 1,-3, 2, 6,-3,-3, 0,-4,-3, 1,-3,-2, 0,-1, 2, 0,-1,-1,-3,-3,-1,-2, 5,-5 },
/* F */ {-3,-4,-2,-5,-3, 8,-4,-1, 0, 1,-4, 1, 0,-4,-4,-4,-3,-3,-2,-1, 1,-1, 4,-4,-5 },
/* G */ { 0,-1,-3,-1,-3,-4, 8,-2,-4,-4,-2,-4,-3, 0,-2,-2,-3, 0,-2,-4,-3,-1,-3,-2,-5 },
/* H */ {-2, 0,-3,-1, 0,-1,-2,10,-4,-3, 0,-3,-1, 1,-2, 1, 0,-1,-2,-4,-3,-1, 2, 0,-5 },
/* I */ {-1,-4,-2,-4,-4, 0,-4,-4, 5, 4,-3, 2, 2,-3,-3,-3,-4,-3,-1, 4,-3,-1,-1,-3,-5 },
/* J */ {-2,-4,-2,-4,-3, 1,-4,-3, 4, 4,-3, 4, 2,-4,-3,-3,-3,-3,-1, 2,-2,-1,-1,-3,-5 },
/* K */ {-1, 0,-3,-1, 1,-4,-2, 0,-3,-3, 6,-3,-2, 0,-1, 2, 3, 0,-1,-3,-3,-1,-2, 1,-5 },
/* L */ {-2,-4,-2,-4,-3, 1,-4,-3, 2, 4,-3, 5, 3,-4,-4,-2,-3,-3,-1, 1, 2,-1,-1,-3,-5 },
/* M */ {-1,-3,-2,-4,-2, 0,-3,-1, 2, 2,-2, 3, 7,-2,-3, 0,-2,-2,-1, 1,-1,-1, 0,-1,-5 },
/* N */ {-1, 5,-2, 2, 0,-4, 0, 1,-3,-4, 0,-4,-2, 7,-2, 0,-1, 1, 0,-3,-4,-1,-2, 0,-5 },
/* P */ {-1,-2,-4,-1,-1,-4,-2,-2,-3,-3,-1,-4,-3,-2,10,-1,-3,-1,-1,-3,-4,-1,-3,-1,-5 },
/* Q */ {-1, 0,-3, 0, 2,-4,-2, 1,-3,-3, 2,-2, 0, 0,-1, 7, 1, 0,-1,-3,-1,-1,-1, 4,-5 },
/* R */ {-2,-1,-4,-2, 0,-3,-3, 0,-4,-3, 3,-3,-2,-1,-3, 1, 7,-1,-1,-3,-3,-1,-1, 0,-5 },
/* S */ { 1, 0,-1, 0,-1,-3, 0,-1,-3,-3, 0,-3,-2, 1,-1, 0,-1, 5, 2,-2,-4,-1,-2, 0,-5 },
/* T */ { 0, 0,-1,-1,-1,-2,-2,-2,-1,-1,-1,-1,-1, 0,-1,-1,-1, 2, 5, 0,-3,-1,-2,-1,-5 },
/* V */ { 0,-3,-1,-4,-3,-1,-4,-4, 4, 2,-3, 1, 1,-3,-3,-3,-3,-2, 0, 5,-3,-1,-1,-3,-5 },
/* W */ {-3,-5,-5,-5,-3, 1,-3,-3,-3,-2,-3,-2,-1,-4,-4,-1,-3,-4,-3,-3,15,-1, 2,-2,-5 },
/* X */ {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-5 },
/* Y */ {-2,-3,-3,-3,-2, 4,-3, 2,-1,-1,-2,-1, 0,-2,-3,-1,-1,-2,-2,-1, 2,-1, 8,-2,-5 },
/* Z */ {-1, 1,-3, 1, 5,-4,-2, 0,-3,-3, 1,-3,-1, 0,-1, 4, 0, 0,-1,-3,-2,-1,-2, 5,-5 },
/* * */ {-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5, 1 }
};
using namespace std;
__constant__ char constQuery[1024];
__constant__ int constSubstitutionMatrix[625];
char convertStringToChar(char character) {
switch(character)
{
case 'A': { return 'A'; }
case 'R': { return 'R'; }
case 'N': { return 'N'; }
case 'D': { return 'D'; }
case 'C': { return 'C'; }
case 'Q': { return 'Q'; }
case 'E': { return 'E'; }
case 'G': { return 'G'; }
case 'H': { return 'H'; }
case 'I': { return 'I'; }
case 'L': { return 'L'; }
case 'K': { return 'K'; }
case 'M': { return 'M'; }
case 'F': { return 'F'; }
case 'P': { return 'P'; }
case 'S': { return 'S'; }
case 'T': { return 'T'; }
case 'W': { return 'W'; }
case 'Y': { return 'Y'; }
case 'V': { return 'V'; }
case 'B': { return 'B'; }
case 'J': { return 'J'; }
case 'Z': { return 'Z'; }
case 'X': { return 'X'; }
}
return '*';
}
// Kernel function for computing the scoring matrix of a sequence
__global__ void f_scoreSequence(char* subject, float* scoringMatrix, float* maxScoreList, int width, int height, int numSubjects) {
//register int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
register int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
if (yIndex == 0) {
//printf("GPU: %d %f\n", constSubstitutionMatrix[24], constQuery[0]);
}
float maxScore = 0;
if (yIndex < numSubjects) {
for (int i = 1; i < (height + 1); i++) {
for (int j = 1; j < (width + 1); j++) {
float score = 0;
score = max(score, scoringMatrix[(width + 1)*(height + 1)*yIndex + (i * (width + 1)) + j - 1] - GAP_PENALTY);
score = max(score, scoringMatrix[(width + 1)*(height + 1)*yIndex + ((i - 1) * (width + 1)) + j] - GAP_PENALTY);
int similarityScore = 0;
int query = (int)constQuery[i-1];
int subjectValue = (int)subject[width*yIndex + j -1];
int searchMatrix;
// first determine how to search the constSubstitutionMatrix
// have both in V-*, 1 in V-Z 1 in P-T (vice versa flip them), 1 in V-Z 1 in A-N (vice versa flip them),
// both in P-T, 1 in P-T 1 in A-N (vice versa flip them), both in A-N
// 6 possibilities after collapsing vice versas
if ((query > 85 || query == 42) && (subjectValue > 85 || subjectValue == 42)) // both in V-*
searchMatrix = 1;
else if ((query > 85 || query == 42) && (subjectValue > 79)) // query in V-*, subjectValue in P-T
searchMatrix = 2;
else if ((query > 79) && (subjectValue > 85 || subjectValue == 42)) {// query in P-T, subjectValue in V-*
int temp = query;
query = subjectValue;
subjectValue = temp;
searchMatrix = 2;
}
else if (query > 85 || query == 42) // query in V-*, subjectValue in A-N
searchMatrix = 3;
else if (subjectValue > 85 || subjectValue == 42) { // query in A-N, subjectValue in V-*
int temp = query;
query = subjectValue;
subjectValue = temp;
searchMatrix = 3;
}
else if (query > 79 && subjectValue > 79) // both in P-T
searchMatrix = 4;
else if (query > 79) // query in P-T, subjectValue in A-N
searchMatrix = 5;
else if (subjectValue > 79) { // query in A-N, subjectValue in P-T
int temp = query;
query = subjectValue;
subjectValue = temp;
searchMatrix = 5;
}
else // both in A-N
searchMatrix = 6;
// based on the searchMatrix value, use a switch case and calculate the similarityScore
// if value is * (42), result is either -5 (not similar) or 1 (similar)
switch(searchMatrix)
{
case 1: {
if (query == 42 && subjectValue == 42) similarityScore = 1;
else if ((query == 42 && subjectValue != 42) || (query != 42 && subjectValue == 42)) similarityScore = -5;
else similarityScore = constSubstitutionMatrix[((query-67) * 25) + subjectValue-67];
break; }
case 2: {
if (query == 42) similarityScore = -5;
else similarityScore = constSubstitutionMatrix[((query-67) * 25) + subjectValue-66];
break; }
case 3: {
if (query == 42) similarityScore = -5;
else similarityScore = constSubstitutionMatrix[((query-67) * 25) + subjectValue-65];
break; }
case 4: {
similarityScore = constSubstitutionMatrix[((query-66) * 25) + subjectValue-66];
break; }
case 5: {
similarityScore = constSubstitutionMatrix[((query-66) * 25) + subjectValue-65];
break; }
case 6: {
similarityScore = constSubstitutionMatrix[((query-65) * 25) + subjectValue-65];
break; }
}
//int similarityScore = constSubstitutionMatrix[((int)constQuery[i - 1] * 25) + (int)subject[width*yIndex + j - 1]];
score = max(score, scoringMatrix[(width + 1)*(height + 1)*yIndex + ((i - 1) * (width + 1)) + j - 1] + similarityScore);
maxScore = max(maxScore, score);
scoringMatrix[(width + 1)*(height + 1)*yIndex + (i * (width + 1)) + j] = score;
}
}
maxScoreList[yIndex] = maxScore;
}
}
vector<seqid_score> smith_waterman_cuda_char(FASTAQuery &query, FASTADatabase &db) {
string querySequenceBuffer = query.get_buffer();
char querySequence[querySequenceBuffer.length() + 1];
copy(querySequenceBuffer.begin(), querySequenceBuffer.end(), querySequence);
querySequence[querySequenceBuffer.length()] = '\0';
vector<seqid_score> scores;
char* d_input_subject;
cudaMallocManaged((void**) &d_input_subject, (db.largestSubjectLength * db.numSubjects) * sizeof(char));
/*
memcpy(d_input_subject, subjectSequences[0], ((largestSubjectLength * numSubjects) + 1) * sizeof(char));
for (int i = 0; i < numSubjects; i++) {
strcat(d_input_subject, subjectSequences[i]);
}
*/
// Set up offsets
int grid_y_dim = ceil(db.numSubjects / BLOCK_Y_DIM);
char* d_input_offsets;
cudaMallocManaged((void**) &d_input_offsets, grid_y_dim * sizeof(char));
float* d_output_scoring;
cudaMallocManaged((void**) &d_output_scoring, ((strlen(querySequence) + 1) * (db.largestSubjectLength + 1) * db.numSubjects) * sizeof(float));
float* d_output_max_score;
cudaMallocManaged((void**) &d_output_max_score, db.numSubjects * sizeof(float));
for (int i = 0; i < db.numSubjects; i++) {
for (int j = 0; j < db.largestSubjectLength; j++) { // Will need to pad here
if (j < db.subjectSequences[i].sequence.length()) {
d_input_subject[i*db.largestSubjectLength + j] = convertStringToChar(db.subjectSequences[i].sequence[j]);
}
else d_input_subject[i*db.largestSubjectLength + j] = '*';
}
}
cudaMemcpyToSymbol(constQuery, querySequence, sizeof(char)*strlen(querySequence));
cudaMemcpyToSymbol(constSubstitutionMatrix, blosum50_alpha, sizeof(int)*625);
// Call GPU
dim3 block(1, BLOCK_Y_DIM);
dim3 grid(1, grid_y_dim);
f_scoreSequence<<<grid, block>>>(d_input_subject, d_output_scoring, d_output_max_score, db.largestSubjectLength, strlen(querySequence), db.numSubjects);
cudaDeviceSynchronize();
cout << endl << db.subjectSequences[0].sequence << endl;
/*
// Print results for 1 subject query
for (int subject = 0; subject < db.numSubjects; subject++) {
string seqA = querySequence;
string seqB = db.subjectSequences[subject].sequence;
cout << " ";
for (int j = 0; j < (seqB.length() + 1); j++) {
cout << seqB[j] << " ";
}
cout << endl;
for (int i = 0; i < (seqA.length() + 1); i++) {
if (i != 0) cout << seqA[i - 1] << " ";
else cout << " ";
for (int j = 0; j < (seqB.length() + 1); j++) {
cout << d_output_scoring[((db.largestSubjectLength + 1) * (querySequence.length() + 1) * subject) + (i * (seqB.length() + 1)) + j] << " ";
}
cout << endl;
}
}*/
// Print results for 1 subject query
for (int subject = 0; subject < db.numSubjects; subject++) {
scores.push_back(make_pair(db.subjectSequences[subject].id, d_output_max_score[subject])); // change this
}
// Free device memory
cudaFree(d_input_subject);
cudaFree(d_input_offsets);
cudaFree(d_output_scoring);
cudaFree(d_output_max_score);
cudaDeviceReset();
return scores;
}
|
1e38cea7e80312cb47be49242bfe16e5a70419b3.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<math.h>
#include<time.h>
#include<cuda.h>
#include<hipfft.h>
#include<cuda_runtime.h>
#include"pfafft.c"//sunpfar
//#include <cutil_inline.h>
//#include <cutil.h>
int main()
{
int nx,nt,nw,i,ix,it;//nx:the number of traces nt:the number of time samples
int NX,BATCH,NX_t;
float **a_input;
float *input;
float *amp;//testing
hipfftHandle plan;
hipfftComplex *data;
clock_t t_z,t_f;
nx=5300;
nt=5001;
NX=npfar(nt);//npfar,from seismic unix
BATCH=nx;
nw=NX/2+1;
NX_t=(NX/2+1)*2;//NX_t makes sure that the array can meet the principle of "in-place"
input=(float*)calloc(NX_t*BATCH,sizeof(float));
amp=(float*)calloc(nw,sizeof(float));//testing
a_input=(float**)calloc(nt,sizeof(float*));
for(it=0;it<nt;it++){
a_input[it]=(float*)calloc(nx,sizeof(float));
}
FILE *fp;
fp=fopen("rec_u_3200.bin","rb");
for(it=0;it<nt;it++){
for(ix=0;ix<nx;ix++){
fread(&a_input[it][ix],sizeof(float),1,fp);
}
}
fclose(fp);
for(ix=0;ix<BATCH;ix++){
for(it=0;it<nt;it++){
input[ix*NX_t+it]=a_input[it][ix];
}
}
printf("re_transpose_done !!!\n");
hipMalloc((void**)&data, sizeof(hipfftComplex)*(NX/2+1)*BATCH);
hipMemcpy(data,input,NX_t*BATCH*sizeof(float),hipMemcpyHostToDevice);
t_z=clock();
hipfftPlan1d(&plan, NX, HIPFFT_R2C, BATCH);
hipfftExecR2C(plan, (hipfftReal*)data, data);
t_f=clock();
hipfftDestroy(plan);
printf("\nCalculating time:%ld (cycles) \n\n",t_f-t_z);
printf("\nCalculating time:%f (ms) \n\n",(double)(t_f-t_z)/CLOCKS_PER_SEC);
hipMemcpy(input,data,nx*nt*sizeof(float),hipMemcpyDeviceToHost);
fp=fopen("bofore_cufft.bin","wb");//testing
for(it=0;it<nt;it++){
for(ix=0;ix<1;ix++){
fwrite(&a_input[it][ix],sizeof(float),1,fp);
}
}
fclose(fp);
fp=fopen("after_cufft.bin","wb");//testing
for(it=0;it<nt;it++){
fwrite(&input[it],sizeof(float),1,fp);
}
fclose(fp);
hipFree(data);
for(i=0;i<nt/2;i++){
amp[i]=sqrt(input[499*NX_t+2*i]*input[499*NX_t+2*i]+input[499*NX_t+2*i+1]*input[499*NX_t+2*i+1]);
}
fp=fopen("amp.bin","wb");//testing
fwrite(amp,sizeof(float),nt/2,fp);
fclose(fp);
return 0;
}
| 1e38cea7e80312cb47be49242bfe16e5a70419b3.cu | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<math.h>
#include<time.h>
#include<cuda.h>
#include<cufft.h>
#include<cuda_runtime.h>
#include"pfafft.c"//即使在无法调用su情况下,保证npfar有效
//#include <cutil_inline.h>
//#include <cutil.h>
int main()
{
int nx,nt,nw,i,ix,it;//nx:the number of traces nt:the number of time samples
int NX,BATCH,NX_t;
float **a_input;
float *input;
float *amp;//testing
cufftHandle plan;
cufftComplex *data;
clock_t t_z,t_f;
nx=5300;
nt=5001;
NX=npfar(nt);//npfar,from seismic unix
BATCH=nx;
nw=NX/2+1;
NX_t=(NX/2+1)*2;//NX_t makes sure that the array can meet the principle of "in-place"
input=(float*)calloc(NX_t*BATCH,sizeof(float));
amp=(float*)calloc(nw,sizeof(float));//testing
a_input=(float**)calloc(nt,sizeof(float*));
for(it=0;it<nt;it++){
a_input[it]=(float*)calloc(nx,sizeof(float));
}
FILE *fp;
fp=fopen("rec_u_3200.bin","rb");
for(it=0;it<nt;it++){
for(ix=0;ix<nx;ix++){
fread(&a_input[it][ix],sizeof(float),1,fp);
}
}
fclose(fp);
for(ix=0;ix<BATCH;ix++){
for(it=0;it<nt;it++){
input[ix*NX_t+it]=a_input[it][ix];
}
}
printf("re_transpose_done !!!\n");
cudaMalloc((void**)&data, sizeof(cufftComplex)*(NX/2+1)*BATCH);
cudaMemcpy(data,input,NX_t*BATCH*sizeof(float),cudaMemcpyHostToDevice);
t_z=clock();
cufftPlan1d(&plan, NX, CUFFT_R2C, BATCH);
cufftExecR2C(plan, (cufftReal*)data, data);
t_f=clock();
cufftDestroy(plan);
printf("\nCalculating time:%ld (cycles) \n\n",t_f-t_z);
printf("\nCalculating time:%f (ms) \n\n",(double)(t_f-t_z)/CLOCKS_PER_SEC);
cudaMemcpy(input,data,nx*nt*sizeof(float),cudaMemcpyDeviceToHost);
fp=fopen("bofore_cufft.bin","wb");//testing
for(it=0;it<nt;it++){
for(ix=0;ix<1;ix++){
fwrite(&a_input[it][ix],sizeof(float),1,fp);
}
}
fclose(fp);
fp=fopen("after_cufft.bin","wb");//testing
for(it=0;it<nt;it++){
fwrite(&input[it],sizeof(float),1,fp);
}
fclose(fp);
cudaFree(data);
for(i=0;i<nt/2;i++){
amp[i]=sqrt(input[499*NX_t+2*i]*input[499*NX_t+2*i]+input[499*NX_t+2*i+1]*input[499*NX_t+2*i+1]);
}
fp=fopen("amp.bin","wb");//testing
fwrite(amp,sizeof(float),nt/2,fp);
fclose(fp);
return 0;
}
|
5b8498d969932b577682c44496fa7859f90d6c60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
void CUDA_CALL(hipError_t result)
{
if (result != hipSuccess)
{
std::cout << "Error " << result << "\n";
std::cout << hipGetErrorString(result) << "\n";
exit(1);
}
}
texture<float, hipTextureType1D, hipReadModeElementType> texRef;
texture<float, 2> texRef2;
// Simple transformation kernel
__global__ void transformKernel(float* output, int width, int height, int output_width, int output_height, float theta)
{
// Calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
float tu = ((float)width * x) / output_width;
float tv = ((float)height * y) / output_height;
output[y * output_width + x] = tex2D(texRef2, tu, tv);
}
// Host code
int main()
{
int width = 32;
int height = 16;
int size = width * height;
float * h_data = (float*) malloc(width * height * sizeof(float));
for (int i = 0 ; i < height; i++)
for (int j = 0; j < width; j++)
h_data[i * width + j] = i * width + j + 1;
printf ("\n Original array \n");
for (int i = 0; i < height; i++)
{
for (int j = 0 ; j < width; j++)
printf ("%f ", h_data[i*width + j]);
printf ("\n");
}
float * d_data;
CUDA_CALL( hipMalloc(&d_data, size * sizeof(float)) );
// Copy to device memory some data located at address h_data
// in host memory
CUDA_CALL( hipMemcpy(d_data, h_data, size * sizeof(float), hipMemcpyHostToDevice) );
// Set texture parameters
//texRef.addressMode[0] = hipAddressModeWrap;
//texRef.addressMode[1] = hipAddressModeWrap;
//texRef.filterMode = hipFilterModeLinear;
//texRef.normalized = true;
// Bind the array to the texture reference
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
CUDA_CALL( hipBindTexture2D(0, texRef2, d_data, desc, width, height, sizeof(float) * width) );
hipDeviceSynchronize();
int output_width = 32;
int output_height = 16;
// Set up block dims.
dim3 dimBlock(4, 4);
dim3 dimGrid((output_width - 1)/dimBlock.x + 1, (output_height - 1)/dimBlock.y + 1);
printf("blockDim = %d, %d\n", dimBlock.x, dimBlock.y);
printf("gridDim = %d, %d\n", dimGrid.x, dimGrid.y);
// Allocate result of transformation in device memory
int output_size = dimGrid.x * dimBlock.x * dimGrid.y * dimBlock.y;
printf("output_size %d\n", output_size);
float* output;
CUDA_CALL( hipMalloc(&output, output_size * sizeof(float)) );
float * h_output = (float*)malloc(output_size * sizeof(float));
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, width, height, output_width, output_height, 90);
hipDeviceSynchronize();
CUDA_CALL( hipMemcpy(h_output, output, output_size * sizeof(float), hipMemcpyDeviceToHost) );
printf ("\nAfter operation\n");
for (int i = 0; i < output_height; i++)
{
for (int j = 0; j < output_width; j++)
printf ("%d %f\n", i*output_width + j, h_output[i*output_width + j]);
printf ("\n");
}
system ("pause");
// Free device memory
hipFree(d_data);
hipFree(output);
return 0;
}
| 5b8498d969932b577682c44496fa7859f90d6c60.cu | #include <iostream>
#include <stdio.h>
void CUDA_CALL(cudaError_t result)
{
if (result != cudaSuccess)
{
std::cout << "Error " << result << "\n";
std::cout << cudaGetErrorString(result) << "\n";
exit(1);
}
}
texture<float, cudaTextureType1D, cudaReadModeElementType> texRef;
texture<float, 2> texRef2;
// Simple transformation kernel
__global__ void transformKernel(float* output, int width, int height, int output_width, int output_height, float theta)
{
// Calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
float tu = ((float)width * x) / output_width;
float tv = ((float)height * y) / output_height;
output[y * output_width + x] = tex2D(texRef2, tu, tv);
}
// Host code
int main()
{
int width = 32;
int height = 16;
int size = width * height;
float * h_data = (float*) malloc(width * height * sizeof(float));
for (int i = 0 ; i < height; i++)
for (int j = 0; j < width; j++)
h_data[i * width + j] = i * width + j + 1;
printf ("\n Original array \n");
for (int i = 0; i < height; i++)
{
for (int j = 0 ; j < width; j++)
printf ("%f ", h_data[i*width + j]);
printf ("\n");
}
float * d_data;
CUDA_CALL( cudaMalloc(&d_data, size * sizeof(float)) );
// Copy to device memory some data located at address h_data
// in host memory
CUDA_CALL( cudaMemcpy(d_data, h_data, size * sizeof(float), cudaMemcpyHostToDevice) );
// Set texture parameters
//texRef.addressMode[0] = cudaAddressModeWrap;
//texRef.addressMode[1] = cudaAddressModeWrap;
//texRef.filterMode = cudaFilterModeLinear;
//texRef.normalized = true;
// Bind the array to the texture reference
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
CUDA_CALL( cudaBindTexture2D(0, texRef2, d_data, desc, width, height, sizeof(float) * width) );
cudaDeviceSynchronize();
int output_width = 32;
int output_height = 16;
// Set up block dims.
dim3 dimBlock(4, 4);
dim3 dimGrid((output_width - 1)/dimBlock.x + 1, (output_height - 1)/dimBlock.y + 1);
printf("blockDim = %d, %d\n", dimBlock.x, dimBlock.y);
printf("gridDim = %d, %d\n", dimGrid.x, dimGrid.y);
// Allocate result of transformation in device memory
int output_size = dimGrid.x * dimBlock.x * dimGrid.y * dimBlock.y;
printf("output_size %d\n", output_size);
float* output;
CUDA_CALL( cudaMalloc(&output, output_size * sizeof(float)) );
float * h_output = (float*)malloc(output_size * sizeof(float));
transformKernel<<<dimGrid, dimBlock>>>(output, width, height, output_width, output_height, 90);
cudaDeviceSynchronize();
CUDA_CALL( cudaMemcpy(h_output, output, output_size * sizeof(float), cudaMemcpyDeviceToHost) );
printf ("\nAfter operation\n");
for (int i = 0; i < output_height; i++)
{
for (int j = 0; j < output_width; j++)
printf ("%d %f\n", i*output_width + j, h_output[i*output_width + j]);
printf ("\n");
}
system ("pause");
// Free device memory
cudaFree(d_data);
cudaFree(output);
return 0;
}
|
30ab3650e0a6b195b4f8cb49598c5ca6d23e643b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "copy_if_hip.cuh"
#include <cudf/legacy/table.hpp>
namespace {
// Returns true if the mask is true and valid (non-null) for index i
// This is the filter functor for apply_boolean_mask
// Note we use a functor here so we can cast to a bitmask_t __restrict__
// pointer on the host side, which we can't do with a lambda.
template <bool has_data, bool has_nulls>
struct boolean_mask_filter
{
boolean_mask_filter(gdf_column const & boolean_mask) :
data{static_cast<bool *>(boolean_mask.data)},
bitmask{reinterpret_cast<bit_mask_t *>(boolean_mask.valid)}
{}
__device__ inline
bool operator()(cudf::size_type i)
{
bool valid = !has_nulls || bit_mask::is_valid(bitmask, i);
bool is_true = !has_data || data[i];
return is_true && valid;
}
bool const * __restrict__ data;
bit_mask_t const * __restrict__ bitmask;
};
} // namespace
namespace cudf {
/*
* Filters a table using a column of boolean values as a mask.
*
* calls copy_if() with the `boolean_mask_filter` functor.
*/
table apply_boolean_mask(table const &input,
gdf_column const &boolean_mask) {
if (boolean_mask.size == 0) return empty_like(input);
CUDF_EXPECTS(boolean_mask.dtype == GDF_BOOL8, "Mask must be Boolean type");
CUDF_EXPECTS(boolean_mask.data != nullptr ||
boolean_mask.valid != nullptr, "Null boolean_mask");
// zero-size inputs are OK, but otherwise input size must match mask size
CUDF_EXPECTS(input.num_rows() == 0 || input.num_rows() == boolean_mask.size,
"Column size mismatch");
if (boolean_mask.data == nullptr)
return detail::copy_if(input, boolean_mask_filter<false, true>{boolean_mask});
else if (not cudf::has_nulls(boolean_mask))
return detail::copy_if(input, boolean_mask_filter<true, false>{boolean_mask});
else
return detail::copy_if(input, boolean_mask_filter<true, true>{boolean_mask});
}
} // namespace cudf
| 30ab3650e0a6b195b4f8cb49598c5ca6d23e643b.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "copy_if.cuh"
#include <cudf/legacy/table.hpp>
namespace {
// Returns true if the mask is true and valid (non-null) for index i
// This is the filter functor for apply_boolean_mask
// Note we use a functor here so we can cast to a bitmask_t __restrict__
// pointer on the host side, which we can't do with a lambda.
template <bool has_data, bool has_nulls>
struct boolean_mask_filter
{
boolean_mask_filter(gdf_column const & boolean_mask) :
data{static_cast<bool *>(boolean_mask.data)},
bitmask{reinterpret_cast<bit_mask_t *>(boolean_mask.valid)}
{}
__device__ inline
bool operator()(cudf::size_type i)
{
bool valid = !has_nulls || bit_mask::is_valid(bitmask, i);
bool is_true = !has_data || data[i];
return is_true && valid;
}
bool const * __restrict__ data;
bit_mask_t const * __restrict__ bitmask;
};
} // namespace
namespace cudf {
/*
* Filters a table using a column of boolean values as a mask.
*
* calls copy_if() with the `boolean_mask_filter` functor.
*/
table apply_boolean_mask(table const &input,
gdf_column const &boolean_mask) {
if (boolean_mask.size == 0) return empty_like(input);
CUDF_EXPECTS(boolean_mask.dtype == GDF_BOOL8, "Mask must be Boolean type");
CUDF_EXPECTS(boolean_mask.data != nullptr ||
boolean_mask.valid != nullptr, "Null boolean_mask");
// zero-size inputs are OK, but otherwise input size must match mask size
CUDF_EXPECTS(input.num_rows() == 0 || input.num_rows() == boolean_mask.size,
"Column size mismatch");
if (boolean_mask.data == nullptr)
return detail::copy_if(input, boolean_mask_filter<false, true>{boolean_mask});
else if (not cudf::has_nulls(boolean_mask))
return detail::copy_if(input, boolean_mask_filter<true, false>{boolean_mask});
else
return detail::copy_if(input, boolean_mask_filter<true, true>{boolean_mask});
}
} // namespace cudf
|
5ad78d2d4cb5284b98f18b48dcb87cc56dec0cc5.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <numeric>
#include <vector>
#include <array>
#include <algorithm>
#include <functional>
#include <random>
#include <chrono>
#include <fstream>
#include <cmath>
using namespace std;
#include "Tensor.cu"
#include "Neurons.cu"
static void Activation(Tensor<>& activations, Tensor<>& weights, Tensor<>& inputs)
{
size_t batch_size = inputs.dims.back();
activations.Resize({weights.dims[1], batch_size});
activations.FillValue(0, 0);
Tensor<>::InnerProduct(activations[{}],
dim3(weights.dims[1], batch_size),
weights[{}],
dim3(weights.dims[1]),
inputs[{}],
dim3(1, batch_size),
weights.dims[0],
0);
}
template <class Neuron>
static void Responses(Tensor<>& responses, Tensor<>& activations)
{
responses.Resize(activations.dims);
Neuron::Response(responses[{}], dim3(), activations[{}], dim3(), activations.Volume(), 0);
}
template <class Neuron>
static void Gradients(Tensor<>& gradients, Tensor<>& responses)
{
gradients.Resize(responses.dims);
Neuron::Gradient(gradients[{}], dim3(), responses[{}], dim3(), responses.Volume(), 0);
}
static void Error(Tensor<>& errors, Tensor<>& output, Tensor<>& targets)
{
errors.Resize(targets.dims);
Tensor<>::Subtract(errors[{}],
dim3(),
output[{}],
dim3(),
targets[{}],
dim3(),
errors.Volume());
}
static void GradientDelta(Tensor<>& gradients_delta, Tensor<>& gradients, Tensor<>& output_delta)
{
gradients_delta.Resize(gradients.dims);
Tensor<>::Multiply(gradients_delta[{}],
gradients[{}],
output_delta[{}],
gradients_delta.Volume());
}
static void WeightsDelta(Tensor<>& weights_gradient, Tensor<>& inputs, Tensor<>& gradients_delta)
{
size_t batch_size = inputs.dims.back();
weights_gradient.Resize({inputs.Volume(-1), gradients_delta.Volume(-1)});
Tensor<>::OuterProduct(weights_gradient[{}],
dim3(weights_gradient.dims[1]),
gradients_delta[{}],
dim3(1, batch_size),
weights_gradient.dims[1],
inputs[{}],
dim3(1, batch_size),
weights_gradient.dims[0],
0);
}
static void SGDUpdate(Tensor<>& weights, Tensor<>& weights_gradient, double alpha)
{
for (size_t i = 0; i < weights.Volume(); i++)
{
weights.memory[i] -= alpha * weights_gradient.memory[i];
weights_gradient.memory[i] = 0;
}
}
static void RMSUpdate(Tensor<>& weights, Tensor<>& weights_gradient, Tensor<>& weights_gradient_rms, double alpha)
{
const double gamma = 0.9;
weights_gradient_rms.Resize(weights_gradient.dims, 0.00001);
for (size_t i = 0; i < weights.Volume(); i++)
{
weights_gradient_rms.memory[i] = gamma * weights_gradient_rms.memory[i] + (1 - gamma) * ::pow(weights_gradient.memory[i], 2);
double learning_rate = alpha / std::sqrt(weights_gradient_rms.memory[i]);
weights.memory[i] -= learning_rate * weights_gradient.memory[i];
weights_gradient.memory[i] = 0;
}
}
static void ADUpdate(Tensor<>& weights, Tensor<>& weights_gradient, Tensor<>& weights_gradient_rms, Tensor<>& weights_delta_rms)
{
const double gamma = 0.9;
weights_gradient_rms.Resize(weights_gradient.dims, 0.00001);
weights_delta_rms.Resize(weights_gradient.dims, 0.00001);
for (size_t i = 0; i < weights.Volume(); i++)
{
weights_gradient_rms.memory[i] = gamma * weights_gradient_rms.memory[i] + (1 - gamma) * ::pow(weights_gradient.memory[i], 2);
double learning_rate = std::sqrt(weights_delta_rms.memory[i]) / std::sqrt(weights_gradient_rms.memory[i]);
double weight_delta = -learning_rate * weights_gradient.memory[i];
//printf("%f + %f\n", gamma * weights_delta_rms.memory[i], (1 - gamma) * ::pow(weight_delta, 2));
weights_delta_rms.memory[i] = gamma * weights_delta_rms.memory[i] + (1 - gamma) * ::pow(weight_delta, 2);
weights.memory[i] += weight_delta;
weights_gradient.memory[i] = 0;
}
}
static void NormalizeWeights(Tensor<>& weights)
{
for (size_t o = 0; o < weights.dims[1]; o++)
Tensor<>::NormalizeToUnitCircle(weights[{0, o}], weights.Volume(1));
}
static void DeltaBack(Tensor<>& input_delta, Tensor<>& weights, Tensor<>& output_delta)
{
size_t batch_size = output_delta.dims.back();
input_delta.Resize({weights.dims[0], batch_size});
input_delta.FillValue(0);
Tensor<>::InnerProduct(input_delta[{}],
dim3(1, batch_size),
weights[{}],
dim3(weights.dims[1]),
output_delta[{}],
dim3(1, batch_size),
weights.dims[1],
0);
}
static void Mean(Tensor<>& mean, Tensor<>& batch)
{
mean.Resize({batch.Volume(-1)});
for (size_t i = 0; i < batch.dims.back(); i++)
Tensor<>::Add(mean[{}], mean[{}], batch[{0, i}], mean.Volume());
mean /= batch.dims.back();
}
static void Variance(Tensor<>& variance, Tensor<>& mean, Tensor<>& batch)
{
variance.Resize({batch.Volume(-1)});
for (size_t i = 0; i < batch.dims.back(); i++)
Tensor<>::Deviation(variance[{}], batch[{0, i}], mean[{}], variance.Volume());
variance /= (batch.dims.back() - 1);
}
static void Standardize(Tensor<>& variance, Tensor<>& mean, Tensor<>& batch)
{
Tensor<>::Sqrt(variance[{}], variance[{}], variance.Volume());
for (size_t i = 0; i < batch.dims.back(); i++)
{
//Tensor<>::Subtract(batch[{0, i}], dim3(), batch[{0, i}], dim3(), mean[{}], dim3(), mean.Volume());
Tensor<>::Divide(batch[{0, i}], batch[{0, i}], variance[{}], variance.Volume());
}
}
| 5ad78d2d4cb5284b98f18b48dcb87cc56dec0cc5.cu | #pragma once
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <numeric>
#include <vector>
#include <array>
#include <algorithm>
#include <functional>
#include <random>
#include <chrono>
#include <fstream>
#include <cmath>
using namespace std;
#include "Tensor.cu"
#include "Neurons.cu"
static void Activation(Tensor<>& activations, Tensor<>& weights, Tensor<>& inputs)
{
size_t batch_size = inputs.dims.back();
activations.Resize({weights.dims[1], batch_size});
activations.FillValue(0, 0);
Tensor<>::InnerProduct(activations[{}],
dim3(weights.dims[1], batch_size),
weights[{}],
dim3(weights.dims[1]),
inputs[{}],
dim3(1, batch_size),
weights.dims[0],
0);
}
template <class Neuron>
static void Responses(Tensor<>& responses, Tensor<>& activations)
{
responses.Resize(activations.dims);
Neuron::Response(responses[{}], dim3(), activations[{}], dim3(), activations.Volume(), 0);
}
template <class Neuron>
static void Gradients(Tensor<>& gradients, Tensor<>& responses)
{
gradients.Resize(responses.dims);
Neuron::Gradient(gradients[{}], dim3(), responses[{}], dim3(), responses.Volume(), 0);
}
static void Error(Tensor<>& errors, Tensor<>& output, Tensor<>& targets)
{
errors.Resize(targets.dims);
Tensor<>::Subtract(errors[{}],
dim3(),
output[{}],
dim3(),
targets[{}],
dim3(),
errors.Volume());
}
static void GradientDelta(Tensor<>& gradients_delta, Tensor<>& gradients, Tensor<>& output_delta)
{
gradients_delta.Resize(gradients.dims);
Tensor<>::Multiply(gradients_delta[{}],
gradients[{}],
output_delta[{}],
gradients_delta.Volume());
}
static void WeightsDelta(Tensor<>& weights_gradient, Tensor<>& inputs, Tensor<>& gradients_delta)
{
size_t batch_size = inputs.dims.back();
weights_gradient.Resize({inputs.Volume(-1), gradients_delta.Volume(-1)});
Tensor<>::OuterProduct(weights_gradient[{}],
dim3(weights_gradient.dims[1]),
gradients_delta[{}],
dim3(1, batch_size),
weights_gradient.dims[1],
inputs[{}],
dim3(1, batch_size),
weights_gradient.dims[0],
0);
}
static void SGDUpdate(Tensor<>& weights, Tensor<>& weights_gradient, double alpha)
{
for (size_t i = 0; i < weights.Volume(); i++)
{
weights.memory[i] -= alpha * weights_gradient.memory[i];
weights_gradient.memory[i] = 0;
}
}
static void RMSUpdate(Tensor<>& weights, Tensor<>& weights_gradient, Tensor<>& weights_gradient_rms, double alpha)
{
const double gamma = 0.9;
weights_gradient_rms.Resize(weights_gradient.dims, 0.00001);
for (size_t i = 0; i < weights.Volume(); i++)
{
weights_gradient_rms.memory[i] = gamma * weights_gradient_rms.memory[i] + (1 - gamma) * std::pow(weights_gradient.memory[i], 2);
double learning_rate = alpha / std::sqrt(weights_gradient_rms.memory[i]);
weights.memory[i] -= learning_rate * weights_gradient.memory[i];
weights_gradient.memory[i] = 0;
}
}
static void ADUpdate(Tensor<>& weights, Tensor<>& weights_gradient, Tensor<>& weights_gradient_rms, Tensor<>& weights_delta_rms)
{
const double gamma = 0.9;
weights_gradient_rms.Resize(weights_gradient.dims, 0.00001);
weights_delta_rms.Resize(weights_gradient.dims, 0.00001);
for (size_t i = 0; i < weights.Volume(); i++)
{
weights_gradient_rms.memory[i] = gamma * weights_gradient_rms.memory[i] + (1 - gamma) * std::pow(weights_gradient.memory[i], 2);
double learning_rate = std::sqrt(weights_delta_rms.memory[i]) / std::sqrt(weights_gradient_rms.memory[i]);
double weight_delta = -learning_rate * weights_gradient.memory[i];
//printf("%f + %f\n", gamma * weights_delta_rms.memory[i], (1 - gamma) * std::pow(weight_delta, 2));
weights_delta_rms.memory[i] = gamma * weights_delta_rms.memory[i] + (1 - gamma) * std::pow(weight_delta, 2);
weights.memory[i] += weight_delta;
weights_gradient.memory[i] = 0;
}
}
static void NormalizeWeights(Tensor<>& weights)
{
for (size_t o = 0; o < weights.dims[1]; o++)
Tensor<>::NormalizeToUnitCircle(weights[{0, o}], weights.Volume(1));
}
static void DeltaBack(Tensor<>& input_delta, Tensor<>& weights, Tensor<>& output_delta)
{
size_t batch_size = output_delta.dims.back();
input_delta.Resize({weights.dims[0], batch_size});
input_delta.FillValue(0);
Tensor<>::InnerProduct(input_delta[{}],
dim3(1, batch_size),
weights[{}],
dim3(weights.dims[1]),
output_delta[{}],
dim3(1, batch_size),
weights.dims[1],
0);
}
static void Mean(Tensor<>& mean, Tensor<>& batch)
{
mean.Resize({batch.Volume(-1)});
for (size_t i = 0; i < batch.dims.back(); i++)
Tensor<>::Add(mean[{}], mean[{}], batch[{0, i}], mean.Volume());
mean /= batch.dims.back();
}
static void Variance(Tensor<>& variance, Tensor<>& mean, Tensor<>& batch)
{
variance.Resize({batch.Volume(-1)});
for (size_t i = 0; i < batch.dims.back(); i++)
Tensor<>::Deviation(variance[{}], batch[{0, i}], mean[{}], variance.Volume());
variance /= (batch.dims.back() - 1);
}
static void Standardize(Tensor<>& variance, Tensor<>& mean, Tensor<>& batch)
{
Tensor<>::Sqrt(variance[{}], variance[{}], variance.Volume());
for (size_t i = 0; i < batch.dims.back(); i++)
{
//Tensor<>::Subtract(batch[{0, i}], dim3(), batch[{0, i}], dim3(), mean[{}], dim3(), mean.Volume());
Tensor<>::Divide(batch[{0, i}], batch[{0, i}], variance[{}], variance.Volume());
}
}
|
4a769823bfab61bebf9cf73a7e4fbec51b4e4c49.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "BFS_kernel_multi_block.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
volatile unsigned int *frontier = NULL;
hipMalloc(&frontier, XSIZE*YSIZE);
volatile unsigned int *frontier2 = NULL;
hipMalloc(&frontier2, XSIZE*YSIZE);
unsigned int frontier_len = 1;
volatile unsigned int *cost = NULL;
hipMalloc(&cost, XSIZE*YSIZE);
volatile int *visited = NULL;
hipMalloc(&visited, XSIZE*YSIZE);
unsigned int *edgeArray = NULL;
hipMalloc(&edgeArray, XSIZE*YSIZE);
unsigned int *edgeArrayAux = NULL;
hipMalloc(&edgeArrayAux, XSIZE*YSIZE);
unsigned int numVertices = 1;
unsigned int numEdges = 1;
volatile unsigned int *frontier_length = NULL;
hipMalloc(&frontier_length, XSIZE*YSIZE);
unsigned int NUM_P_PER_MP = 1;
unsigned int W_Q_SIZE = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
BFS_kernel_multi_block), dim3(gridBlock),dim3(threadBlock), 0, 0, frontier,frontier2,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,NUM_P_PER_MP,W_Q_SIZE);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
BFS_kernel_multi_block), dim3(gridBlock),dim3(threadBlock), 0, 0, frontier,frontier2,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,NUM_P_PER_MP,W_Q_SIZE);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
BFS_kernel_multi_block), dim3(gridBlock),dim3(threadBlock), 0, 0, frontier,frontier2,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,NUM_P_PER_MP,W_Q_SIZE);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4a769823bfab61bebf9cf73a7e4fbec51b4e4c49.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "BFS_kernel_multi_block.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
volatile unsigned int *frontier = NULL;
cudaMalloc(&frontier, XSIZE*YSIZE);
volatile unsigned int *frontier2 = NULL;
cudaMalloc(&frontier2, XSIZE*YSIZE);
unsigned int frontier_len = 1;
volatile unsigned int *cost = NULL;
cudaMalloc(&cost, XSIZE*YSIZE);
volatile int *visited = NULL;
cudaMalloc(&visited, XSIZE*YSIZE);
unsigned int *edgeArray = NULL;
cudaMalloc(&edgeArray, XSIZE*YSIZE);
unsigned int *edgeArrayAux = NULL;
cudaMalloc(&edgeArrayAux, XSIZE*YSIZE);
unsigned int numVertices = 1;
unsigned int numEdges = 1;
volatile unsigned int *frontier_length = NULL;
cudaMalloc(&frontier_length, XSIZE*YSIZE);
unsigned int NUM_P_PER_MP = 1;
unsigned int W_Q_SIZE = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
BFS_kernel_multi_block<<<gridBlock,threadBlock>>>(frontier,frontier2,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,NUM_P_PER_MP,W_Q_SIZE);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
BFS_kernel_multi_block<<<gridBlock,threadBlock>>>(frontier,frontier2,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,NUM_P_PER_MP,W_Q_SIZE);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
BFS_kernel_multi_block<<<gridBlock,threadBlock>>>(frontier,frontier2,frontier_len,cost,visited,edgeArray,edgeArrayAux,numVertices,numEdges,frontier_length,NUM_P_PER_MP,W_Q_SIZE);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b5d18fe23470b48e2647a692122394e58174de6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <cutil_inline.h>
void extern timerstart(char* name);
void extern timerend();
// Device code
__device__ void vectorAdd_main(int* A,int* B,int* C, int opt)
{
/* Sample Vector add code
C[opt]=A[opt]+ B[opt];
*/
}
//This kernel distributes the work irrespective of the size
__global__ void vectorAdd_kernel(int* A,int* B,int* C,int TOTAL_SIZE)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int opt = tid; opt < TOTAL_SIZE; opt += THREAD_N){
vectorAdd_main(A,B,C,opt);
}
}
// Host code
int* abstract_vectorAdd(int* A,int* B,int* C,int TOTAL_SIZE)
{
timerstart("CUDA");
int* d_A ;
int* d_B ;
int* d_C ;
// Allocate vectors in device memory
cutilSafeCall( hipMalloc((void**)&d_A, sizeof(int)*1000)) ;
cutilSafeCall( hipMalloc((void**)&d_B, sizeof(int)*1000)) ;
cutilSafeCall( hipMalloc((void**)&d_C, sizeof(int)*1000)) ;
// Copy variables from host memory to device memory
cutilSafeCall( hipMemcpy(d_A,A, sizeof(int)*1000,hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_B,B, sizeof(int)*1000,hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_C,C, sizeof(int)*1000,hipMemcpyHostToDevice) );
// Kernel call with 480*256 threads
hipLaunchKernelGGL(( vectorAdd_kernel), dim3(480), dim3(256), 0, 0, d_A,d_B,d_C,TOTAL_SIZE);
cutilCheckMsg("kernel launch failure\n");
cutilSafeCall( hipDeviceSynchronize() );
// Copy variables from device memory to host memory
cutilSafeCall( hipMemcpy(C,d_C, sizeof(int)*1000,hipMemcpyDeviceToHost) );
if (d_A) cutilSafeCall( hipFree(d_A)) ;
if (d_B) cutilSafeCall( hipFree(d_B)) ;
if (d_C) cutilSafeCall( hipFree(d_C)) ;
timerend();
return C;
} | b5d18fe23470b48e2647a692122394e58174de6e.cu | #include <stdio.h>
#include <cutil_inline.h>
void extern timerstart(char* name);
void extern timerend();
// Device code
__device__ void vectorAdd_main(int* A,int* B,int* C, int opt)
{
/* Sample Vector add code
C[opt]=A[opt]+ B[opt];
*/
}
//This kernel distributes the work irrespective of the size
__global__ void vectorAdd_kernel(int* A,int* B,int* C,int TOTAL_SIZE)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int opt = tid; opt < TOTAL_SIZE; opt += THREAD_N){
vectorAdd_main(A,B,C,opt);
}
}
// Host code
int* abstract_vectorAdd(int* A,int* B,int* C,int TOTAL_SIZE)
{
timerstart("CUDA");
int* d_A ;
int* d_B ;
int* d_C ;
// Allocate vectors in device memory
cutilSafeCall( cudaMalloc((void**)&d_A, sizeof(int)*1000)) ;
cutilSafeCall( cudaMalloc((void**)&d_B, sizeof(int)*1000)) ;
cutilSafeCall( cudaMalloc((void**)&d_C, sizeof(int)*1000)) ;
// Copy variables from host memory to device memory
cutilSafeCall( cudaMemcpy(d_A,A, sizeof(int)*1000,cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_B,B, sizeof(int)*1000,cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_C,C, sizeof(int)*1000,cudaMemcpyHostToDevice) );
// Kernel call with 480*256 threads
vectorAdd_kernel<<<480, 256>>>(d_A,d_B,d_C,TOTAL_SIZE);
cutilCheckMsg("kernel launch failure\n");
cutilSafeCall( cudaThreadSynchronize() );
// Copy variables from device memory to host memory
cutilSafeCall( cudaMemcpy(C,d_C, sizeof(int)*1000,cudaMemcpyDeviceToHost) );
if (d_A) cutilSafeCall( cudaFree(d_A)) ;
if (d_B) cutilSafeCall( cudaFree(d_B)) ;
if (d_C) cutilSafeCall( cudaFree(d_C)) ;
timerend();
return C;
} |
cde770fc3d12571af28d3356cb6dee310ab02fd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> >{
__host__ __device__
thrust::tuple<unsigned char, unsigned char, unsigned char> operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels : thrust::unary_function<thrust::tuple<float, float, float>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<float, float, float> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255);
}
};
struct createMask : thrust::unary_function<uchar4, unsigned char >{
__host__ __device__
unsigned char operator()(uchar4 pixel) {
return (pixel.x + pixel.y + pixel.z < 3*255) ? 1 : 0;
}
};
struct charToFloat : thrust::unary_function<unsigned char, float >{
__host__ __device__
float operator()(unsigned char v) {
return float(v);
}
};
struct blendInterior : thrust::unary_function<thrust::tuple<uchar4, unsigned char, uchar4>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<uchar4, unsigned char, uchar4> t) {
return thrust::get<1>(t) ? thrust::get<0>(t) : thrust::get<2>(t);
}
};
__global__ void compute_border(unsigned char *d_borderPixels, unsigned char *d_strictInteriorPixels,
unsigned char *d_mask,
const size_t numRowsSource, const size_t numColsSource) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int index = r * numColsSource + c;
if (c >= numColsSource || r >= numRowsSource) return;
if (d_mask[index] == 0) {
d_borderPixels[index] = 0;
d_strictInteriorPixels[index] = 0;
} else if (d_mask[(r -1) * numColsSource + c] && d_mask[(r + 1) * numColsSource + c] &&
d_mask[r * numColsSource + c - 1] && d_mask[r * numColsSource + c + 1]) {
d_strictInteriorPixels[index] = 1;
d_borderPixels[index] = 0;
}
else {
d_strictInteriorPixels[index] = 0;
d_borderPixels[index] = 1;
}
}
__global__ void computeG(unsigned char *d_channel,
float * d_g,
int numColsSource, int numRowsSource,
unsigned char *d_strictInteriorPixels) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int offset = r * numColsSource + c;
if (c >= numColsSource || r >= numRowsSource) return;
float sum = 0;
if (d_strictInteriorPixels[offset]) {
sum = 4.f * d_channel[offset];
sum -= (float)d_channel[offset - 1] + (float)d_channel[offset + 1];
sum -= (float)d_channel[offset + numColsSource] + (float)d_channel[offset - numColsSource];
}
d_g[offset] = sum;
}
__global__ void computeIteration(unsigned char *d_dstImg,
unsigned char *d_strictInteriorPixels,
unsigned char *d_borderPixels,
int numRowsSource, int numColsSource,
float *f,
float *g,
float *f_next)
{
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int offset = r * numColsSource + c;
if (c >= numColsSource || r >= numRowsSource) return;
float blendedSum = 0.f;
float borderSum = 0.f;
if (d_strictInteriorPixels[offset - 1]) {
blendedSum += f[offset - 1];
}
else {
borderSum += d_dstImg[offset - 1];
}
if (d_strictInteriorPixels[offset + 1]) {
blendedSum += f[offset + 1];
}
else {
borderSum += d_dstImg[offset + 1];
}
if (d_strictInteriorPixels[offset - numColsSource]) {
blendedSum += f[offset - numColsSource];
}
else {
borderSum += d_dstImg[offset - numColsSource];
}
if (d_strictInteriorPixels[offset + numColsSource]) {
blendedSum += f[offset + numColsSource];
}
else {
borderSum += d_dstImg[offset + numColsSource];
}
float f_next_val = (blendedSum + borderSum + g[offset]) / 4.f;
f_next[offset] = min(255.f, max(0.f, f_next_val)); //clip to [0, 255]
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_dstImg, //IN
uchar4* const h_blendedImg) //OUT
{
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
to catch any errors that happened while executing the kernel.
*/
size_t srcSize = numRowsSource * numColsSource;
thrust::device_vector<uchar4> d_sourceImg(h_sourceImg, h_sourceImg + srcSize);
thrust::device_vector<uchar4> d_dstImg(h_dstImg, h_dstImg + srcSize);
// mask
thrust::device_vector<unsigned char> d_mask(srcSize);
thrust::transform(d_sourceImg.begin(), d_sourceImg.end(), d_mask.begin(), createMask());
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// border
thrust::device_vector<unsigned char> d_borderPixels(srcSize);
thrust::device_vector<unsigned char> d_strictInteriorPixels(srcSize);
const dim3 gridSize(numColsSource, numRowsSource, 1);
hipLaunchKernelGGL(( compute_border), dim3(gridSize), dim3(1), 0, 0, thrust::raw_pointer_cast(d_borderPixels.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_mask.data()),
numRowsSource, numColsSource);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
thrust::device_vector<unsigned char> d_red;
thrust::device_vector<unsigned char> d_blue;
thrust::device_vector<unsigned char> d_green;
d_red. resize(srcSize);
d_blue. resize(srcSize);
d_green.resize(srcSize);
//split the image
thrust::transform(d_sourceImg.begin(), d_sourceImg.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red.begin(),
d_blue.begin(),
d_green.begin())),
splitChannels());
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
thrust::device_vector<unsigned char> d_red_dst(srcSize);
thrust::device_vector<unsigned char> d_blue_dst(srcSize);
thrust::device_vector<unsigned char> d_green_dst(srcSize);
thrust::transform(d_dstImg.begin(), d_dstImg.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red_dst.begin(),
d_blue_dst.begin(),
d_green_dst.begin())),
splitChannels());
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// copy channels
const size_t numIterations = 800;
thrust::device_vector<float> d_red1(srcSize);
thrust::device_vector<float> d_blue1(srcSize);
thrust::device_vector<float> d_green1(srcSize);
thrust::transform(d_red.begin(), d_red.end(), d_red1.begin(), charToFloat());
thrust::transform(d_green.begin(), d_green.end(), d_green1.begin(), charToFloat());
thrust::transform(d_blue.begin(), d_blue.end(), d_blue1.begin(), charToFloat());
thrust::device_vector<float> d_red2(d_red1);
thrust::device_vector<float> d_blue2(d_blue1);
thrust::device_vector<float> d_green2(d_green1);
thrust::device_vector<float> * ptr1, *ptr2, *temp;
thrust::device_vector<float> d_g_red(srcSize);
thrust::device_vector<float> d_g_green(srcSize);
thrust::device_vector<float> d_g_blue(srcSize);
hipLaunchKernelGGL(( computeG), dim3(gridSize), dim3(1), 0, 0, thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_g_red.data()),
numColsSource, numRowsSource,
thrust::raw_pointer_cast(d_strictInteriorPixels.data()));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeG), dim3(gridSize), dim3(1), 0, 0, thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_g_green.data()),
numColsSource, numRowsSource,
thrust::raw_pointer_cast(d_strictInteriorPixels.data()));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeG), dim3(gridSize), dim3(1), 0, 0, thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_g_blue.data()),
numColsSource, numRowsSource,
thrust::raw_pointer_cast(d_strictInteriorPixels.data()));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
ptr1 = &d_red1;
ptr2 = &d_red2;
for (size_t i = 0; i < numIterations; ++i) {
hipLaunchKernelGGL(( computeIteration), dim3(gridSize), dim3(1), 0, 0, thrust::raw_pointer_cast(d_red_dst.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_borderPixels.data()),
numRowsSource, numColsSource,
thrust::raw_pointer_cast(ptr1->data()),
thrust::raw_pointer_cast(d_g_red.data()),
thrust::raw_pointer_cast(ptr2->data()));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
temp = ptr2;
ptr2 = ptr1;
ptr1 = temp;
}
ptr1 = &d_green1;
ptr2 = &d_green2;
for (size_t i = 0; i < numIterations; ++i) {
hipLaunchKernelGGL(( computeIteration), dim3(gridSize), dim3(1), 0, 0, thrust::raw_pointer_cast(d_green_dst.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_borderPixels.data()),
numRowsSource, numColsSource,
thrust::raw_pointer_cast(ptr1->data()),
thrust::raw_pointer_cast(d_g_green.data()),
thrust::raw_pointer_cast(ptr2->data()));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
temp = ptr2;
ptr2 = ptr1;
ptr1 = temp;
}
ptr1 = &d_blue1;
ptr2 = &d_blue2;
for (size_t i = 0; i < numIterations; ++i) {
hipLaunchKernelGGL(( computeIteration), dim3(gridSize), dim3(1), 0, 0, thrust::raw_pointer_cast(d_blue_dst.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_borderPixels.data()),
numRowsSource, numColsSource,
thrust::raw_pointer_cast(ptr1->data()),
thrust::raw_pointer_cast(d_g_blue.data()),
thrust::raw_pointer_cast(ptr2->data()));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
temp = ptr2;
ptr2 = ptr1;
ptr1 = temp;
}
// merge
thrust::device_vector<uchar4> d_outputImg(srcSize);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_red1.begin(),
d_blue1.begin(),
d_green1.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red1.end(),
d_blue1.end(),
d_green1.end())),
d_outputImg.begin(),
combineChannels());
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// merge with origin dst
thrust::device_vector<uchar4> d_blendedImg(srcSize);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_outputImg.begin(), d_strictInteriorPixels.begin(), d_dstImg.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_outputImg.end(), d_strictInteriorPixels.end(), d_dstImg.end())),
d_blendedImg.begin(),
blendInterior());
checkCudaErrors(hipMemcpy(h_blendedImg, thrust::raw_pointer_cast(d_blendedImg.data()), sizeof(uchar4)*srcSize, hipMemcpyDeviceToHost));
}
| cde770fc3d12571af28d3356cb6dee310ab02fd6.cu | //Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "utils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> >{
__host__ __device__
thrust::tuple<unsigned char, unsigned char, unsigned char> operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels : thrust::unary_function<thrust::tuple<float, float, float>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<float, float, float> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255);
}
};
struct createMask : thrust::unary_function<uchar4, unsigned char >{
__host__ __device__
unsigned char operator()(uchar4 pixel) {
return (pixel.x + pixel.y + pixel.z < 3*255) ? 1 : 0;
}
};
struct charToFloat : thrust::unary_function<unsigned char, float >{
__host__ __device__
float operator()(unsigned char v) {
return float(v);
}
};
struct blendInterior : thrust::unary_function<thrust::tuple<uchar4, unsigned char, uchar4>, uchar4> {
__host__ __device__
uchar4 operator()(thrust::tuple<uchar4, unsigned char, uchar4> t) {
return thrust::get<1>(t) ? thrust::get<0>(t) : thrust::get<2>(t);
}
};
__global__ void compute_border(unsigned char *d_borderPixels, unsigned char *d_strictInteriorPixels,
unsigned char *d_mask,
const size_t numRowsSource, const size_t numColsSource) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int index = r * numColsSource + c;
if (c >= numColsSource || r >= numRowsSource) return;
if (d_mask[index] == 0) {
d_borderPixels[index] = 0;
d_strictInteriorPixels[index] = 0;
} else if (d_mask[(r -1) * numColsSource + c] && d_mask[(r + 1) * numColsSource + c] &&
d_mask[r * numColsSource + c - 1] && d_mask[r * numColsSource + c + 1]) {
d_strictInteriorPixels[index] = 1;
d_borderPixels[index] = 0;
}
else {
d_strictInteriorPixels[index] = 0;
d_borderPixels[index] = 1;
}
}
__global__ void computeG(unsigned char *d_channel,
float * d_g,
int numColsSource, int numRowsSource,
unsigned char *d_strictInteriorPixels) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int offset = r * numColsSource + c;
if (c >= numColsSource || r >= numRowsSource) return;
float sum = 0;
if (d_strictInteriorPixels[offset]) {
sum = 4.f * d_channel[offset];
sum -= (float)d_channel[offset - 1] + (float)d_channel[offset + 1];
sum -= (float)d_channel[offset + numColsSource] + (float)d_channel[offset - numColsSource];
}
d_g[offset] = sum;
}
__global__ void computeIteration(unsigned char *d_dstImg,
unsigned char *d_strictInteriorPixels,
unsigned char *d_borderPixels,
int numRowsSource, int numColsSource,
float *f,
float *g,
float *f_next)
{
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int offset = r * numColsSource + c;
if (c >= numColsSource || r >= numRowsSource) return;
float blendedSum = 0.f;
float borderSum = 0.f;
if (d_strictInteriorPixels[offset - 1]) {
blendedSum += f[offset - 1];
}
else {
borderSum += d_dstImg[offset - 1];
}
if (d_strictInteriorPixels[offset + 1]) {
blendedSum += f[offset + 1];
}
else {
borderSum += d_dstImg[offset + 1];
}
if (d_strictInteriorPixels[offset - numColsSource]) {
blendedSum += f[offset - numColsSource];
}
else {
borderSum += d_dstImg[offset - numColsSource];
}
if (d_strictInteriorPixels[offset + numColsSource]) {
blendedSum += f[offset + numColsSource];
}
else {
borderSum += d_dstImg[offset + numColsSource];
}
float f_next_val = (blendedSum + borderSum + g[offset]) / 4.f;
f_next[offset] = min(255.f, max(0.f, f_next_val)); //clip to [0, 255]
}
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_dstImg, //IN
uchar4* const h_blendedImg) //OUT
{
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
to catch any errors that happened while executing the kernel.
*/
size_t srcSize = numRowsSource * numColsSource;
thrust::device_vector<uchar4> d_sourceImg(h_sourceImg, h_sourceImg + srcSize);
thrust::device_vector<uchar4> d_dstImg(h_dstImg, h_dstImg + srcSize);
// mask
thrust::device_vector<unsigned char> d_mask(srcSize);
thrust::transform(d_sourceImg.begin(), d_sourceImg.end(), d_mask.begin(), createMask());
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// border
thrust::device_vector<unsigned char> d_borderPixels(srcSize);
thrust::device_vector<unsigned char> d_strictInteriorPixels(srcSize);
const dim3 gridSize(numColsSource, numRowsSource, 1);
compute_border<<<gridSize, 1>>>(thrust::raw_pointer_cast(d_borderPixels.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_mask.data()),
numRowsSource, numColsSource);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
thrust::device_vector<unsigned char> d_red;
thrust::device_vector<unsigned char> d_blue;
thrust::device_vector<unsigned char> d_green;
d_red. resize(srcSize);
d_blue. resize(srcSize);
d_green.resize(srcSize);
//split the image
thrust::transform(d_sourceImg.begin(), d_sourceImg.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red.begin(),
d_blue.begin(),
d_green.begin())),
splitChannels());
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
thrust::device_vector<unsigned char> d_red_dst(srcSize);
thrust::device_vector<unsigned char> d_blue_dst(srcSize);
thrust::device_vector<unsigned char> d_green_dst(srcSize);
thrust::transform(d_dstImg.begin(), d_dstImg.end(), thrust::make_zip_iterator(
thrust::make_tuple(d_red_dst.begin(),
d_blue_dst.begin(),
d_green_dst.begin())),
splitChannels());
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// copy channels
const size_t numIterations = 800;
thrust::device_vector<float> d_red1(srcSize);
thrust::device_vector<float> d_blue1(srcSize);
thrust::device_vector<float> d_green1(srcSize);
thrust::transform(d_red.begin(), d_red.end(), d_red1.begin(), charToFloat());
thrust::transform(d_green.begin(), d_green.end(), d_green1.begin(), charToFloat());
thrust::transform(d_blue.begin(), d_blue.end(), d_blue1.begin(), charToFloat());
thrust::device_vector<float> d_red2(d_red1);
thrust::device_vector<float> d_blue2(d_blue1);
thrust::device_vector<float> d_green2(d_green1);
thrust::device_vector<float> * ptr1, *ptr2, *temp;
thrust::device_vector<float> d_g_red(srcSize);
thrust::device_vector<float> d_g_green(srcSize);
thrust::device_vector<float> d_g_blue(srcSize);
computeG<<<gridSize, 1>>>(thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_g_red.data()),
numColsSource, numRowsSource,
thrust::raw_pointer_cast(d_strictInteriorPixels.data()));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
computeG<<<gridSize, 1>>>(thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_g_green.data()),
numColsSource, numRowsSource,
thrust::raw_pointer_cast(d_strictInteriorPixels.data()));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
computeG<<<gridSize, 1>>>(thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_g_blue.data()),
numColsSource, numRowsSource,
thrust::raw_pointer_cast(d_strictInteriorPixels.data()));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
ptr1 = &d_red1;
ptr2 = &d_red2;
for (size_t i = 0; i < numIterations; ++i) {
computeIteration<<<gridSize, 1>>>(thrust::raw_pointer_cast(d_red_dst.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_borderPixels.data()),
numRowsSource, numColsSource,
thrust::raw_pointer_cast(ptr1->data()),
thrust::raw_pointer_cast(d_g_red.data()),
thrust::raw_pointer_cast(ptr2->data()));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
temp = ptr2;
ptr2 = ptr1;
ptr1 = temp;
}
ptr1 = &d_green1;
ptr2 = &d_green2;
for (size_t i = 0; i < numIterations; ++i) {
computeIteration<<<gridSize, 1>>>(thrust::raw_pointer_cast(d_green_dst.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_borderPixels.data()),
numRowsSource, numColsSource,
thrust::raw_pointer_cast(ptr1->data()),
thrust::raw_pointer_cast(d_g_green.data()),
thrust::raw_pointer_cast(ptr2->data()));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
temp = ptr2;
ptr2 = ptr1;
ptr1 = temp;
}
ptr1 = &d_blue1;
ptr2 = &d_blue2;
for (size_t i = 0; i < numIterations; ++i) {
computeIteration<<<gridSize, 1>>>(thrust::raw_pointer_cast(d_blue_dst.data()),
thrust::raw_pointer_cast(d_strictInteriorPixels.data()),
thrust::raw_pointer_cast(d_borderPixels.data()),
numRowsSource, numColsSource,
thrust::raw_pointer_cast(ptr1->data()),
thrust::raw_pointer_cast(d_g_blue.data()),
thrust::raw_pointer_cast(ptr2->data()));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
temp = ptr2;
ptr2 = ptr1;
ptr1 = temp;
}
// merge
thrust::device_vector<uchar4> d_outputImg(srcSize);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_red1.begin(),
d_blue1.begin(),
d_green1.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red1.end(),
d_blue1.end(),
d_green1.end())),
d_outputImg.begin(),
combineChannels());
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// merge with origin dst
thrust::device_vector<uchar4> d_blendedImg(srcSize);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_outputImg.begin(), d_strictInteriorPixels.begin(), d_dstImg.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_outputImg.end(), d_strictInteriorPixels.end(), d_dstImg.end())),
d_blendedImg.begin(),
blendInterior());
checkCudaErrors(cudaMemcpy(h_blendedImg, thrust::raw_pointer_cast(d_blendedImg.data()), sizeof(uchar4)*srcSize, cudaMemcpyDeviceToHost));
}
|
77c2caaffcc5c89adf74ef8c16e895ede3914cb8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// See this header for all of the recursive handling of tuples of vectors
#include <tests/utilities/tuple_vectors.h>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/valid_vectors.h>
#include <tests/utilities/cudf_test_fixtures.h>
#include <join/joining.h>
#include <join/join_compute_api.h>
#include <utilities/bit_util.cuh>
#include <cudf/cudf.h>
#include <cudf/join.hpp>
#include <rmm/rmm.h>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <iostream>
#include <vector>
#include <map>
#include <type_traits>
#include <memory>
#include <cstdlib>
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << ", " << p.second;
std::cout << "\n";
return os;
}
}
// A new instance of this class will be created for each *TEST(JoinTest, ...)
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct JoinTest : public GdfTest
{
// The join type is passed via a member of the template argument class
const join_op op = test_parameters::op;
gdf_context ctxt = {
test_parameters::join_type == gdf_method::GDF_SORT,
test_parameters::join_type,
0
};
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns to be joined, and the value_type of each
// vector determiens the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
multi_column_t left_columns;
multi_column_t right_columns;
// valids for multi_columns
std::vector<host_valid_pointer> left_valids;
std::vector<host_valid_pointer> right_valids;
// Type for a unique_ptr to a gdf_column with a custom deleter
// Custom deleter is defined at construction
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Containers for unique_ptrs to gdf_columns that will be used in the gdf_join
// functions. unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_left_columns;
std::vector<gdf_col_pointer> gdf_right_columns;
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
JoinTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~JoinTest()
{
}
/* --------------------------------------------------------------------------*
* @brief Creates a unique_ptr that wraps a gdf_column structure
* intialized with a host vector
*
* @param host_vector vector containing data to be transfered to device side column
* @param host_valid vector containing valid masks associated with the supplied vector
* @param n_count null_count to be set for the generated column
*
* @returns A unique_ptr wrapping the new gdf_column
* --------------------------------------------------------------------------*/
template <typename col_type>
gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector, gdf_valid_type* host_valid,
const gdf_size_type n_count)
{
// Deduce the type and set the gdf_dtype accordingly
gdf_dtype gdf_col_type;
if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32;
else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64;
// Create a new instance of a gdf_column with a custom deleter that will
// free the associated device memory when it eventually goes out of scope
auto deleter = [](gdf_column* col) {
col->size = 0;
RMM_FREE(col->data, 0);
RMM_FREE(col->valid, 0);
};
gdf_col_pointer the_column{new gdf_column{}, deleter};
// Allocate device storage for gdf_column and copy contents from host_vector
EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS);
EXPECT_EQ(hipMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), hipMemcpyHostToDevice), hipSuccess);
// Allocate device storage for gdf_column.valid
if (host_valid != nullptr) {
EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), gdf_valid_allocation_size(host_vector.size()), 0), RMM_SUCCESS);
EXPECT_EQ(hipMemcpy(the_column->valid, host_valid, gdf_num_bitmask_elements(host_vector.size()), hipMemcpyHostToDevice), hipSuccess);
the_column->null_count = n_count;
} else {
the_column->valid = nullptr;
the_column->null_count = 0;
}
// Fill the gdf_column members
the_column->size = host_vector.size();
the_column->dtype = gdf_col_type;
gdf_dtype_extra_info extra_info{TIME_UNIT_NONE};
the_column->dtype_info = extra_info;
return the_column;
}
// Compile time recursion to convert each vector in a tuple of vectors into
// a gdf_column and append it to a vector of gdf_columns
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
//bottom of compile-time recursion
//purposely empty...
}
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I < sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
// Creates a gdf_column for the current vector and pushes it onto
// the vector of gdf_columns
if (valids.size() != 0) {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), valids[I].get(), n_count));
} else {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), nullptr, n_count));
}
//recurse to next vector in tuple
convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t, valids, n_count);
}
// Converts a tuple of host vectors into a vector of gdf_columns
std::vector<gdf_col_pointer>
initialize_gdf_columns(multi_column_t host_columns, std::vector<host_valid_pointer>& valids,
const gdf_size_type n_count)
{
std::vector<gdf_col_pointer> gdf_columns;
convert_tuple_to_gdf_columns(gdf_columns, host_columns, valids, n_count);
return gdf_columns;
}
/* --------------------------------------------------------------------------*
* @brief Initializes two sets of columns, left and right, with random
* values for the join operation.
*
* @param left_column_length The length of the left set of columns
* @param left_column_range The upper bound of random values for the left
* columns. Values are [0, left_column_range)
* @param right_column_length The length of the right set of columns
* @param right_column_range The upper bound of random values for the right
* columns. Values are [0, right_column_range)
* @param print Optionally print the left and right set of columns for debug
* -------------------------------------------------------------------------*/
void create_input( size_t left_column_length, size_t left_column_range,
size_t right_column_length, size_t right_column_range,
bool print = false, const gdf_size_type n_count = 0)
{
initialize_tuple(left_columns, left_column_length, left_column_range, static_cast<size_t>(ctxt.flag_sorted));
initialize_tuple(right_columns, right_column_length, right_column_range, static_cast<size_t>(ctxt.flag_sorted));
auto n_columns = std::tuple_size<multi_column_t>::value;
initialize_valids(left_valids, n_columns, left_column_length, 0);
initialize_valids(right_valids, n_columns, right_column_length, 0);
gdf_left_columns = initialize_gdf_columns(left_columns, left_valids, n_count);
gdf_right_columns = initialize_gdf_columns(right_columns, right_valids, n_count);
// Fill vector of raw pointers to gdf_columns
gdf_raw_left_columns.clear();
gdf_raw_right_columns.clear();
for(auto const& c : gdf_left_columns){
gdf_raw_left_columns.push_back(c.get());
}
for(auto const& c : gdf_right_columns){
gdf_raw_right_columns.push_back(c.get());
}
if(print)
{
std::cout << "Left column(s) created. Size: " << std::get<0>(left_columns).size() << std::endl;
print_tuples_and_valids(left_columns, left_valids);
std::cout << "Right column(s) created. Size: " << std::get<0>(right_columns).size() << std::endl;
print_tuples_and_valids(right_columns, right_valids);
}
}
/* --------------------------------------------------------------------------*
* @brief Creates two gdf_columns with size 1 data buffer allocations, but
* with a specified `size` attributed
*
* @param left_column_length The length of the left column
* @param right_column_length The length of the right column
* -------------------------------------------------------------------------*/
void create_dummy_input( gdf_size_type const left_column_length,
gdf_size_type const right_column_length)
{
using col_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
// Only allocate a single element
std::vector<col_type> dummy_vector_left(1, static_cast<col_type>(0));
std::vector<col_type> dummy_vector_right(1, static_cast<col_type>(0));
gdf_left_columns.push_back(create_gdf_column<col_type>(dummy_vector_left, nullptr, 0));
gdf_right_columns.push_back(create_gdf_column<col_type>(dummy_vector_right, nullptr, 0));
// Fill vector of raw pointers to gdf_columns
for (auto const& c : gdf_left_columns) {
c->size = left_column_length;
gdf_raw_left_columns.push_back(c.get());
}
for (auto const& c : gdf_right_columns) {
c->size = right_column_length;
gdf_raw_right_columns.push_back(c.get());
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes a reference solution for joining the left and right sets of columns
*
* @param print Option to print the solution for debug
* @param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(bool print = false, bool sort = true)
{
// Use the type of the first vector as the key_type
using key_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = std::get<0>(right_columns);
auto build_valid = right_valids[0].get();
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
if (gdf_is_valid(build_valid, right_index)) {
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = std::get<0>(left_columns);
auto probe_valid = left_valids[0].get();
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
if (gdf_is_valid(probe_valid, left_index)) {
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
// If all of the columns in right_columns[right_index] == all of the columns in left_columns[left_index]
// Then this index pair is added to the result as a matching pair of row indices
if( true == rows_equal_using_valids(left_columns, right_columns, left_valids, right_valids, left_index, right_index)){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
}
// For left joins, insert a NULL if no match is found
if((false == match) &&
((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
if (gdf_is_valid(probe_valid, left_index)) {
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()) || (!gdf_is_valid(build_valid, right_index)))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "Reference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @param gdf_result A vector of result_type that holds the result of the libgdf join function
* @param print Option to print the result computed by the libgdf function
* @param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
std::vector<gdf_size_type> range;
std::vector<std::pair<int, int>> columns_in_common (num_columns);
for (int i = 0; i < num_columns; ++i)
{
range.push_back(i);
columns_in_common[i].first = i;
columns_in_common[i].second = i;
}
// Both left and right col join indexes are same
std::vector <gdf_column *> result_idx_cols = {&left_result, &right_result};
cudf::table left_gdf_columns(gdf_raw_left_columns);
cudf::table right_gdf_columns(gdf_raw_right_columns);
cudf::table result_idx_table (result_idx_cols);
cudf::table result;
switch(op)
{
case join_op::LEFT:
{
cudf::left_join(
left_gdf_columns, right_gdf_columns,
range, range, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::INNER:
{
cudf::inner_join(
left_gdf_columns, right_gdf_columns,
range, range, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::FULL:
{
cudf::full_join(
left_gdf_columns, right_gdf_columns,
range, range, columns_in_common,
&result_idx_table, &ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(hipMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
EXPECT_EQ(hipMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "GDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
};
// This structure is used to nest the join operations, join method and
// number/types of columns for use with Google Test type-parameterized
// tests .Here join_operation refers to the type of join eg. INNER,
// LEFT, FULL and join_method refers to the underlying join algorithm
//that performs it eg. GDF_HASH or GDF_SORT.
template<join_op join_operation,
gdf_method join_method,
typename tuple_of_vectors,
bool keys_are_unique = false>
struct TestParameters
{
// The method to use for the join
const static join_op op{join_operation};
// The method to use for the join
const static gdf_method join_type{join_method};
// The tuple of vectors that determines the number and types of the columns to join
using multi_column_t = tuple_of_vectors;
const static bool unique_keys{keys_are_unique};
};
const static gdf_method HASH = gdf_method::GDF_HASH;
const static gdf_method SORT = gdf_method::GDF_SORT;
template <typename... T>
using VTuple = std::tuple<std::vector<T>...>;
// Using Google Tests "Type Parameterized Tests"
// Every test defined as TYPED_TEST(JoinTest, *) will be run once for every instance of
// TestParameters defined below
// The kind of join is determined by the first template argument to TestParameters
// The number and types of columns used in both the left and right sets of columns are
// determined by the number and types of vectors in the std::tuple<...> that is the second
// template argument to TestParameters
typedef ::testing::Types<
// Single column inner join tests for all types
TestParameters< join_op::INNER, HASH, VTuple<int32_t > >,
TestParameters< join_op::INNER, HASH, VTuple<int64_t > >,
TestParameters< join_op::INNER, HASH, VTuple<float > >,
TestParameters< join_op::INNER, HASH, VTuple<double > >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint64_t> >,
TestParameters< join_op::INNER, SORT, VTuple<int32_t > >,
TestParameters< join_op::INNER, SORT, VTuple<int64_t > >,
TestParameters< join_op::INNER, SORT, VTuple<float > >,
TestParameters< join_op::INNER, SORT, VTuple<double > >,
TestParameters< join_op::INNER, SORT, VTuple<uint32_t> >,
TestParameters< join_op::INNER, SORT, VTuple<uint64_t> >,
// Single column left join tests for all types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<int64_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double > >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint64_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<int32_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<int64_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<float > >,
TestParameters< join_op::LEFT, SORT, VTuple<double > >,
TestParameters< join_op::LEFT, SORT, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<uint64_t> >,
// Single column full join tests for all types
TestParameters< join_op::FULL, HASH, VTuple<int32_t > >,
TestParameters< join_op::FULL, HASH, VTuple<int64_t > >,
TestParameters< join_op::FULL, HASH, VTuple<float > >,
TestParameters< join_op::FULL, HASH, VTuple<double > >,
TestParameters< join_op::FULL, HASH, VTuple<uint32_t> >,
TestParameters< join_op::FULL, HASH, VTuple<uint64_t> >,
// Two Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double , uint32_t, int64_t> >,
// Two Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::INNER, HASH, VTuple<double , uint32_t, int64_t> >,
// Four column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<float, uint32_t, double, int32_t> >,
// Four column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<double, float, int64_t, double> >,
// Five column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t, int32_t> >,
// Five column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t, float> >
> Implementations;
TYPED_TEST_CASE(JoinTest, Implementations);
// This test is used for debugging purposes and is disabled by default.
// The input sizes are small and has a large amount of debug printing enabled.
TYPED_TEST(JoinTest, DISABLED_DebugTest)
{
this->create_input(5, 2,
5, 2,
true);
std::vector<result_type> reference_result = this->compute_reference_solution(true);
std::vector<result_type> gdf_result = this->compute_gdf_result(true);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EqualValues)
{
this->create_input(100,1,
1000,1);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, MaxRandomValues)
{
this->create_input(10000,RAND_MAX,
10000,RAND_MAX);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, LeftColumnsBigger)
{
this->create_input(10000,100,
100,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, RightColumnsBigger)
{
this->create_input(100,100,
10000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyLeftFrame)
{
this->create_input(0,100,
1000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyRightFrame)
{
this->create_input(1000,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, BothFramesEmpty)
{
this->create_input(0,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// The below tests check correct reporting of missing valid pointer
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct JoinValidTest : public JoinTest<test_parameters>
{ };
using ValidTestImplementation = testing::Types< TestParameters< join_op::INNER, SORT, VTuple<int32_t >>,
TestParameters< join_op::LEFT , SORT, VTuple<int32_t >>,
TestParameters< join_op::FULL , SORT, VTuple<int32_t >> >;
TYPED_TEST_CASE(JoinValidTest, ValidTestImplementation);
TYPED_TEST(JoinValidTest, ReportValidMaskError)
{
this->create_input(1000,100,
100,100,
false, 1);
CUDF_EXPECT_THROW_MESSAGE (this->compute_gdf_result(false, true, GDF_VALIDITY_UNSUPPORTED), "GDF Validity is unsupported by sort_join");
}
// The below tests are for testing inputs that are at or above the maximum input size possible
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct MaxJoinTest : public JoinTest<test_parameters>
{ };
// Only test for single column inputs for Inner and Left joins because these tests take a long time
using MaxImplementations = testing::Types< TestParameters< join_op::INNER, HASH, VTuple<int32_t >>,
TestParameters< join_op::LEFT, HASH, VTuple<int32_t >> >;
TYPED_TEST_CASE(MaxJoinTest, MaxImplementations);
TYPED_TEST(MaxJoinTest, InputTooLarge)
{
const gdf_size_type left_table_size = 100;
const gdf_size_type right_table_size =
static_cast<gdf_size_type>(std::numeric_limits<int>::max());
this->create_dummy_input(left_table_size, right_table_size);
const bool print_result{false};
const bool sort_result{false};
// We expect the function to fail when the input is this large
const gdf_error expected_error{GDF_COLUMN_SIZE_TOO_BIG};
CUDF_EXPECT_THROW_MESSAGE ( this->compute_gdf_result(print_result, sort_result, expected_error), "right column size is too big");
}
// These tests will only fail on a non-release build where `assert`s are enabled
#ifndef NDEBUG
TEST(HashTableSizeDeathTest, ZeroOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{0};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
TEST(HashTableSizeDeathTest, TooLargeOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{101};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
#endif
TEST(HashTableSizeTest, OverflowTest){
int const num_insertions{std::numeric_limits<int>::max()};
uint32_t occupancy{50};
size_t hash_table_size = compute_hash_table_size(num_insertions, occupancy);
size_t expected_size{ size_t{2} * std::numeric_limits<int>::max()};
ASSERT_TRUE(hash_table_size > num_insertions);
EXPECT_EQ(expected_size, hash_table_size);
}
| 77c2caaffcc5c89adf74ef8c16e895ede3914cb8.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// See this header for all of the recursive handling of tuples of vectors
#include <tests/utilities/tuple_vectors.h>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/valid_vectors.h>
#include <tests/utilities/cudf_test_fixtures.h>
#include <join/joining.h>
#include <join/join_compute_api.h>
#include <utilities/bit_util.cuh>
#include <cudf/cudf.h>
#include <cudf/join.hpp>
#include <rmm/rmm.h>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include <iostream>
#include <vector>
#include <map>
#include <type_traits>
#include <memory>
#include <cstdlib>
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << ", " << p.second;
std::cout << "\n";
return os;
}
}
// A new instance of this class will be created for each *TEST(JoinTest, ...)
// Put all repeated setup and validation stuff here
template <class test_parameters>
struct JoinTest : public GdfTest
{
// The join type is passed via a member of the template argument class
const join_op op = test_parameters::op;
gdf_context ctxt = {
test_parameters::join_type == gdf_method::GDF_SORT,
test_parameters::join_type,
0
};
// multi_column_t is a tuple of vectors. The number of vectors in the tuple
// determines the number of columns to be joined, and the value_type of each
// vector determiens the data type of the column
using multi_column_t = typename test_parameters::multi_column_t;
multi_column_t left_columns;
multi_column_t right_columns;
// valids for multi_columns
std::vector<host_valid_pointer> left_valids;
std::vector<host_valid_pointer> right_valids;
// Type for a unique_ptr to a gdf_column with a custom deleter
// Custom deleter is defined at construction
using gdf_col_pointer =
typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Containers for unique_ptrs to gdf_columns that will be used in the gdf_join
// functions. unique_ptrs are used to automate freeing device memory
std::vector<gdf_col_pointer> gdf_left_columns;
std::vector<gdf_col_pointer> gdf_right_columns;
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
JoinTest()
{
// Use constant seed so the psuedo-random order is the same each time
// Each time the class is constructed a new constant seed is used
static size_t number_of_instantiations{0};
std::srand(number_of_instantiations++);
}
~JoinTest()
{
}
/* --------------------------------------------------------------------------*
* @brief Creates a unique_ptr that wraps a gdf_column structure
* intialized with a host vector
*
* @param host_vector vector containing data to be transfered to device side column
* @param host_valid vector containing valid masks associated with the supplied vector
* @param n_count null_count to be set for the generated column
*
* @returns A unique_ptr wrapping the new gdf_column
* --------------------------------------------------------------------------*/
template <typename col_type>
gdf_col_pointer create_gdf_column(std::vector<col_type> const & host_vector, gdf_valid_type* host_valid,
const gdf_size_type n_count)
{
// Deduce the type and set the gdf_dtype accordingly
gdf_dtype gdf_col_type;
if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8;
else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16;
else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32;
else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64;
else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32;
else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64;
// Create a new instance of a gdf_column with a custom deleter that will
// free the associated device memory when it eventually goes out of scope
auto deleter = [](gdf_column* col) {
col->size = 0;
RMM_FREE(col->data, 0);
RMM_FREE(col->valid, 0);
};
gdf_col_pointer the_column{new gdf_column{}, deleter};
// Allocate device storage for gdf_column and copy contents from host_vector
EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), RMM_SUCCESS);
EXPECT_EQ(cudaMemcpy(the_column->data, host_vector.data(), host_vector.size() * sizeof(col_type), cudaMemcpyHostToDevice), cudaSuccess);
// Allocate device storage for gdf_column.valid
if (host_valid != nullptr) {
EXPECT_EQ(RMM_ALLOC((void**)&(the_column->valid), gdf_valid_allocation_size(host_vector.size()), 0), RMM_SUCCESS);
EXPECT_EQ(cudaMemcpy(the_column->valid, host_valid, gdf_num_bitmask_elements(host_vector.size()), cudaMemcpyHostToDevice), cudaSuccess);
the_column->null_count = n_count;
} else {
the_column->valid = nullptr;
the_column->null_count = 0;
}
// Fill the gdf_column members
the_column->size = host_vector.size();
the_column->dtype = gdf_col_type;
gdf_dtype_extra_info extra_info{TIME_UNIT_NONE};
the_column->dtype_info = extra_info;
return the_column;
}
// Compile time recursion to convert each vector in a tuple of vectors into
// a gdf_column and append it to a vector of gdf_columns
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I == sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
//bottom of compile-time recursion
//purposely empty...
}
template<std::size_t I = 0, typename... Tp>
inline typename std::enable_if<I < sizeof...(Tp), void>::type
convert_tuple_to_gdf_columns(std::vector<gdf_col_pointer> &gdf_columns,std::tuple<std::vector<Tp>...>& t, std::vector<host_valid_pointer>& valids, const gdf_size_type n_count)
{
// Creates a gdf_column for the current vector and pushes it onto
// the vector of gdf_columns
if (valids.size() != 0) {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), valids[I].get(), n_count));
} else {
gdf_columns.push_back(create_gdf_column(std::get<I>(t), nullptr, n_count));
}
//recurse to next vector in tuple
convert_tuple_to_gdf_columns<I + 1, Tp...>(gdf_columns, t, valids, n_count);
}
// Converts a tuple of host vectors into a vector of gdf_columns
std::vector<gdf_col_pointer>
initialize_gdf_columns(multi_column_t host_columns, std::vector<host_valid_pointer>& valids,
const gdf_size_type n_count)
{
std::vector<gdf_col_pointer> gdf_columns;
convert_tuple_to_gdf_columns(gdf_columns, host_columns, valids, n_count);
return gdf_columns;
}
/* --------------------------------------------------------------------------*
* @brief Initializes two sets of columns, left and right, with random
* values for the join operation.
*
* @param left_column_length The length of the left set of columns
* @param left_column_range The upper bound of random values for the left
* columns. Values are [0, left_column_range)
* @param right_column_length The length of the right set of columns
* @param right_column_range The upper bound of random values for the right
* columns. Values are [0, right_column_range)
* @param print Optionally print the left and right set of columns for debug
* -------------------------------------------------------------------------*/
void create_input( size_t left_column_length, size_t left_column_range,
size_t right_column_length, size_t right_column_range,
bool print = false, const gdf_size_type n_count = 0)
{
initialize_tuple(left_columns, left_column_length, left_column_range, static_cast<size_t>(ctxt.flag_sorted));
initialize_tuple(right_columns, right_column_length, right_column_range, static_cast<size_t>(ctxt.flag_sorted));
auto n_columns = std::tuple_size<multi_column_t>::value;
initialize_valids(left_valids, n_columns, left_column_length, 0);
initialize_valids(right_valids, n_columns, right_column_length, 0);
gdf_left_columns = initialize_gdf_columns(left_columns, left_valids, n_count);
gdf_right_columns = initialize_gdf_columns(right_columns, right_valids, n_count);
// Fill vector of raw pointers to gdf_columns
gdf_raw_left_columns.clear();
gdf_raw_right_columns.clear();
for(auto const& c : gdf_left_columns){
gdf_raw_left_columns.push_back(c.get());
}
for(auto const& c : gdf_right_columns){
gdf_raw_right_columns.push_back(c.get());
}
if(print)
{
std::cout << "Left column(s) created. Size: " << std::get<0>(left_columns).size() << std::endl;
print_tuples_and_valids(left_columns, left_valids);
std::cout << "Right column(s) created. Size: " << std::get<0>(right_columns).size() << std::endl;
print_tuples_and_valids(right_columns, right_valids);
}
}
/* --------------------------------------------------------------------------*
* @brief Creates two gdf_columns with size 1 data buffer allocations, but
* with a specified `size` attributed
*
* @param left_column_length The length of the left column
* @param right_column_length The length of the right column
* -------------------------------------------------------------------------*/
void create_dummy_input( gdf_size_type const left_column_length,
gdf_size_type const right_column_length)
{
using col_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
// Only allocate a single element
std::vector<col_type> dummy_vector_left(1, static_cast<col_type>(0));
std::vector<col_type> dummy_vector_right(1, static_cast<col_type>(0));
gdf_left_columns.push_back(create_gdf_column<col_type>(dummy_vector_left, nullptr, 0));
gdf_right_columns.push_back(create_gdf_column<col_type>(dummy_vector_right, nullptr, 0));
// Fill vector of raw pointers to gdf_columns
for (auto const& c : gdf_left_columns) {
c->size = left_column_length;
gdf_raw_left_columns.push_back(c.get());
}
for (auto const& c : gdf_right_columns) {
c->size = right_column_length;
gdf_raw_right_columns.push_back(c.get());
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes a reference solution for joining the left and right sets of columns
*
* @param print Option to print the solution for debug
* @param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(bool print = false, bool sort = true)
{
// Use the type of the first vector as the key_type
using key_type = typename std::tuple_element<0, multi_column_t>::type::value_type;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = std::get<0>(right_columns);
auto build_valid = right_valids[0].get();
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
if (gdf_is_valid(build_valid, right_index)) {
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = std::get<0>(left_columns);
auto probe_valid = left_valids[0].get();
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
if (gdf_is_valid(probe_valid, left_index)) {
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
// If all of the columns in right_columns[right_index] == all of the columns in left_columns[left_index]
// Then this index pair is added to the result as a matching pair of row indices
if( true == rows_equal_using_valids(left_columns, right_columns, left_valids, right_valids, left_index, right_index)){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
}
// For left joins, insert a NULL if no match is found
if((false == match) &&
((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
if (gdf_is_valid(probe_valid, left_index)) {
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()) || (!gdf_is_valid(build_valid, right_index)))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "Reference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @param gdf_result A vector of result_type that holds the result of the libgdf join function
* @param print Option to print the result computed by the libgdf function
* @param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
const int num_columns = std::tuple_size<multi_column_t>::value;
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
std::vector<gdf_size_type> range;
std::vector<std::pair<int, int>> columns_in_common (num_columns);
for (int i = 0; i < num_columns; ++i)
{
range.push_back(i);
columns_in_common[i].first = i;
columns_in_common[i].second = i;
}
// Both left and right col join indexes are same
std::vector <gdf_column *> result_idx_cols = {&left_result, &right_result};
cudf::table left_gdf_columns(gdf_raw_left_columns);
cudf::table right_gdf_columns(gdf_raw_right_columns);
cudf::table result_idx_table (result_idx_cols);
cudf::table result;
switch(op)
{
case join_op::LEFT:
{
cudf::left_join(
left_gdf_columns, right_gdf_columns,
range, range, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::INNER:
{
cudf::inner_join(
left_gdf_columns, right_gdf_columns,
range, range, columns_in_common,
&result_idx_table, &ctxt);
break;
}
case join_op::FULL:
{
cudf::full_join(
left_gdf_columns, right_gdf_columns,
range, range, columns_in_common,
&result_idx_table, &ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(cudaMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
EXPECT_EQ(cudaMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "GDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
};
// This structure is used to nest the join operations, join method and
// number/types of columns for use with Google Test type-parameterized
// tests .Here join_operation refers to the type of join eg. INNER,
// LEFT, FULL and join_method refers to the underlying join algorithm
//that performs it eg. GDF_HASH or GDF_SORT.
template<join_op join_operation,
gdf_method join_method,
typename tuple_of_vectors,
bool keys_are_unique = false>
struct TestParameters
{
// The method to use for the join
const static join_op op{join_operation};
// The method to use for the join
const static gdf_method join_type{join_method};
// The tuple of vectors that determines the number and types of the columns to join
using multi_column_t = tuple_of_vectors;
const static bool unique_keys{keys_are_unique};
};
const static gdf_method HASH = gdf_method::GDF_HASH;
const static gdf_method SORT = gdf_method::GDF_SORT;
template <typename... T>
using VTuple = std::tuple<std::vector<T>...>;
// Using Google Tests "Type Parameterized Tests"
// Every test defined as TYPED_TEST(JoinTest, *) will be run once for every instance of
// TestParameters defined below
// The kind of join is determined by the first template argument to TestParameters
// The number and types of columns used in both the left and right sets of columns are
// determined by the number and types of vectors in the std::tuple<...> that is the second
// template argument to TestParameters
typedef ::testing::Types<
// Single column inner join tests for all types
TestParameters< join_op::INNER, HASH, VTuple<int32_t > >,
TestParameters< join_op::INNER, HASH, VTuple<int64_t > >,
TestParameters< join_op::INNER, HASH, VTuple<float > >,
TestParameters< join_op::INNER, HASH, VTuple<double > >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint64_t> >,
TestParameters< join_op::INNER, SORT, VTuple<int32_t > >,
TestParameters< join_op::INNER, SORT, VTuple<int64_t > >,
TestParameters< join_op::INNER, SORT, VTuple<float > >,
TestParameters< join_op::INNER, SORT, VTuple<double > >,
TestParameters< join_op::INNER, SORT, VTuple<uint32_t> >,
TestParameters< join_op::INNER, SORT, VTuple<uint64_t> >,
// Single column left join tests for all types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<int64_t > >,
TestParameters< join_op::LEFT, HASH, VTuple<float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double > >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint64_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<int32_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<int64_t > >,
TestParameters< join_op::LEFT, SORT, VTuple<float > >,
TestParameters< join_op::LEFT, SORT, VTuple<double > >,
TestParameters< join_op::LEFT, SORT, VTuple<uint32_t> >,
TestParameters< join_op::LEFT, SORT, VTuple<uint64_t> >,
// Single column full join tests for all types
TestParameters< join_op::FULL, HASH, VTuple<int32_t > >,
TestParameters< join_op::FULL, HASH, VTuple<int64_t > >,
TestParameters< join_op::FULL, HASH, VTuple<float > >,
TestParameters< join_op::FULL, HASH, VTuple<double > >,
TestParameters< join_op::FULL, HASH, VTuple<uint32_t> >,
TestParameters< join_op::FULL, HASH, VTuple<uint64_t> >,
// Two Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Left Join tests for some combination of types
TestParameters< join_op::LEFT, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::LEFT, HASH, VTuple<double , uint32_t, int64_t> >,
// Two Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, int32_t> >,
// Three Column Inner Join tests for some combination of types
TestParameters< join_op::INNER, HASH, VTuple<int32_t , uint32_t, float > >,
TestParameters< join_op::INNER, HASH, VTuple<double , uint32_t, int64_t> >,
// Four column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t> >,
TestParameters< join_op::LEFT, HASH, VTuple<float, uint32_t, double, int32_t> >,
// Four column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t> >,
TestParameters< join_op::INNER, HASH, VTuple<double, float, int64_t, double> >,
// Five column test for Left Joins
TestParameters< join_op::LEFT, HASH, VTuple<double, int32_t, int64_t, int32_t, int32_t> >,
// Five column test for Inner Joins
TestParameters< join_op::INNER, HASH, VTuple<uint32_t, float, int64_t, int32_t, float> >
> Implementations;
TYPED_TEST_CASE(JoinTest, Implementations);
// This test is used for debugging purposes and is disabled by default.
// The input sizes are small and has a large amount of debug printing enabled.
TYPED_TEST(JoinTest, DISABLED_DebugTest)
{
this->create_input(5, 2,
5, 2,
true);
std::vector<result_type> reference_result = this->compute_reference_solution(true);
std::vector<result_type> gdf_result = this->compute_gdf_result(true);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EqualValues)
{
this->create_input(100,1,
1000,1);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, MaxRandomValues)
{
this->create_input(10000,RAND_MAX,
10000,RAND_MAX);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, LeftColumnsBigger)
{
this->create_input(10000,100,
100,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, RightColumnsBigger)
{
this->create_input(100,100,
10000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyLeftFrame)
{
this->create_input(0,100,
1000,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, EmptyRightFrame)
{
this->create_input(1000,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TYPED_TEST(JoinTest, BothFramesEmpty)
{
this->create_input(0,100,
0,100);
std::vector<result_type> reference_result = this->compute_reference_solution();
std::vector<result_type> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// The below tests check correct reporting of missing valid pointer
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct JoinValidTest : public JoinTest<test_parameters>
{ };
using ValidTestImplementation = testing::Types< TestParameters< join_op::INNER, SORT, VTuple<int32_t >>,
TestParameters< join_op::LEFT , SORT, VTuple<int32_t >>,
TestParameters< join_op::FULL , SORT, VTuple<int32_t >> >;
TYPED_TEST_CASE(JoinValidTest, ValidTestImplementation);
TYPED_TEST(JoinValidTest, ReportValidMaskError)
{
this->create_input(1000,100,
100,100,
false, 1);
CUDF_EXPECT_THROW_MESSAGE (this->compute_gdf_result(false, true, GDF_VALIDITY_UNSUPPORTED), "GDF Validity is unsupported by sort_join");
}
// The below tests are for testing inputs that are at or above the maximum input size possible
// Create a new derived class from JoinTest so we can do a new Typed Test set of tests
template <class test_parameters>
struct MaxJoinTest : public JoinTest<test_parameters>
{ };
// Only test for single column inputs for Inner and Left joins because these tests take a long time
using MaxImplementations = testing::Types< TestParameters< join_op::INNER, HASH, VTuple<int32_t >>,
TestParameters< join_op::LEFT, HASH, VTuple<int32_t >> >;
TYPED_TEST_CASE(MaxJoinTest, MaxImplementations);
TYPED_TEST(MaxJoinTest, InputTooLarge)
{
const gdf_size_type left_table_size = 100;
const gdf_size_type right_table_size =
static_cast<gdf_size_type>(std::numeric_limits<int>::max());
this->create_dummy_input(left_table_size, right_table_size);
const bool print_result{false};
const bool sort_result{false};
// We expect the function to fail when the input is this large
const gdf_error expected_error{GDF_COLUMN_SIZE_TOO_BIG};
CUDF_EXPECT_THROW_MESSAGE ( this->compute_gdf_result(print_result, sort_result, expected_error), "right column size is too big");
}
// These tests will only fail on a non-release build where `assert`s are enabled
#ifndef NDEBUG
TEST(HashTableSizeDeathTest, ZeroOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{0};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
TEST(HashTableSizeDeathTest, TooLargeOccupancyTest){
int const num_insertions{100};
uint32_t occupancy{101};
EXPECT_DEATH(compute_hash_table_size(num_insertions,occupancy),"");
}
#endif
TEST(HashTableSizeTest, OverflowTest){
int const num_insertions{std::numeric_limits<int>::max()};
uint32_t occupancy{50};
size_t hash_table_size = compute_hash_table_size(num_insertions, occupancy);
size_t expected_size{ size_t{2} * std::numeric_limits<int>::max()};
ASSERT_TRUE(hash_table_size > num_insertions);
EXPECT_EQ(expected_size, hash_table_size);
}
|
ce57800aad789d3cb1ae8e0993c5a73c0e6babdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.141592
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void raysum(float *dev_f , float *dev_r , int wdF, int wdR, float dtheta, float dt, int nrays){
float ini, delta, x, y, cumsum, tol, ctheta, stheta, ttheta, theta, t;
int X, Y, i, j;
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
if ((i<wdR) && (j < nrays) ){
theta = i*dtheta;
t = -1.0 + j*dt;
tol = 1.0/sqrtf(2);
ini = -tol;
delta = (float) sqrtf(2)/(wdF-1);
ctheta = cosf(theta);
stheta = sinf(theta);
ttheta = tanf(theta);
if(stheta < tol){
cumsum = 0;
for(Y = 0; Y < wdF; Y++){
y = ini + Y*delta;
x = (t/ctheta - y*ttheta);
X = (int) floorf((x - ini)/delta);
if(X > -1 && X < wdF-1){
cumsum += (dev_f[Y*wdF + (X+1)] - dev_f[Y*wdF + X])*(x - (ini + X*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(ctheta);
}
else{
cumsum = 0;
for(X = 0; X < wdF; X++){
x = ini + X*delta;
y = (t/stheta - x/ttheta);
Y = (int) floorf((y - ini)/delta);
if(Y > -1 && Y < wdF-1){
cumsum += (dev_f[(Y+1)*wdF + X] - dev_f[Y*wdF + X])*(y - (ini + Y*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(stheta);
}
}
}
int main(int argc, char *argv[]) {
int i, j;
float dt, dtheta;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
int sizeImage = atoi(argv[2]);
int nrays = atoi(argv[3]);
int nangles = atoi(argv[4]);
int wdf = sizeImage;
int wdr = nangles;
FILE *fp = fopen(argv[1], "r");
float *f;
float *radon;
float *dev_r = NULL;
float *dev_f = NULL;
unsigned int grid1, grid2;
grid1 = (unsigned int) ceilf(((float)(nangles)/16));
grid2 = (unsigned int) ceilf(((float)(nrays)/16));
fprintf(stderr, "%d %d\n", grid1, grid2);
dim3 grid(grid1, grid2, 1);
dim3 blocks(16, 16, 1);
CUDA_CHECK_RETURN(hipMalloc((void**) &dev_f, sizeof(float)*sizeImage*sizeImage));
CUDA_CHECK_RETURN(hipMalloc((void **)&dev_r , nangles*nrays*sizeof(float) ) );
radon = (float *)malloc(nangles*nrays*sizeof(float));
f = (float *)malloc(sizeImage*sizeImage*sizeof(float));
for (i = 0; i < sizeImage*sizeImage; i++)
fscanf(fp, "%f", &f[i]);
CUDA_CHECK_RETURN(hipMemcpy (dev_f , f , sizeImage*sizeImage*sizeof(float) , hipMemcpyHostToDevice));
hipEventRecord(start);
dt = 2.0/(nrays-1);
dtheta = PI/(nangles-1);
hipLaunchKernelGGL(( raysum), dim3(grid), dim3(blocks), 0, 0, dev_f, dev_r, wdf, wdr, dtheta, dt, nrays);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipMemcpy (radon , dev_r , nangles*nrays*sizeof(float) , hipMemcpyDeviceToHost) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
fprintf(stderr, "%f ms\n", milliseconds);
for ( i = 0; i < nrays ; i++){
for(j=0 ; j<nangles; j++){
fprintf(stdout, "%f ", radon[(nrays-1-i)*wdr + (nangles-1-j)]);
}
fprintf(stdout, "\n");
}
CUDA_CHECK_RETURN(hipFree((void*) dev_r));
CUDA_CHECK_RETURN(hipFree((void*) dev_f));
CUDA_CHECK_RETURN(hipDeviceReset());
free(radon);
free(f);
fclose(fp);
return 0;
}
| ce57800aad789d3cb1ae8e0993c5a73c0e6babdf.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.141592
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void raysum(float *dev_f , float *dev_r , int wdF, int wdR, float dtheta, float dt, int nrays){
float ini, delta, x, y, cumsum, tol, ctheta, stheta, ttheta, theta, t;
int X, Y, i, j;
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
if ((i<wdR) && (j < nrays) ){
theta = i*dtheta;
t = -1.0 + j*dt;
tol = 1.0/sqrtf(2);
ini = -tol;
delta = (float) sqrtf(2)/(wdF-1);
ctheta = cosf(theta);
stheta = sinf(theta);
ttheta = tanf(theta);
if(stheta < tol){
cumsum = 0;
for(Y = 0; Y < wdF; Y++){
y = ini + Y*delta;
x = (t/ctheta - y*ttheta);
X = (int) floorf((x - ini)/delta);
if(X > -1 && X < wdF-1){
cumsum += (dev_f[Y*wdF + (X+1)] - dev_f[Y*wdF + X])*(x - (ini + X*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(ctheta);
}
else{
cumsum = 0;
for(X = 0; X < wdF; X++){
x = ini + X*delta;
y = (t/stheta - x/ttheta);
Y = (int) floorf((y - ini)/delta);
if(Y > -1 && Y < wdF-1){
cumsum += (dev_f[(Y+1)*wdF + X] - dev_f[Y*wdF + X])*(y - (ini + Y*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(stheta);
}
}
}
int main(int argc, char *argv[]) {
int i, j;
float dt, dtheta;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
int sizeImage = atoi(argv[2]);
int nrays = atoi(argv[3]);
int nangles = atoi(argv[4]);
int wdf = sizeImage;
int wdr = nangles;
FILE *fp = fopen(argv[1], "r");
float *f;
float *radon;
float *dev_r = NULL;
float *dev_f = NULL;
unsigned int grid1, grid2;
grid1 = (unsigned int) ceilf(((float)(nangles)/16));
grid2 = (unsigned int) ceilf(((float)(nrays)/16));
fprintf(stderr, "%d %d\n", grid1, grid2);
dim3 grid(grid1, grid2, 1);
dim3 blocks(16, 16, 1);
CUDA_CHECK_RETURN(cudaMalloc((void**) &dev_f, sizeof(float)*sizeImage*sizeImage));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dev_r , nangles*nrays*sizeof(float) ) );
radon = (float *)malloc(nangles*nrays*sizeof(float));
f = (float *)malloc(sizeImage*sizeImage*sizeof(float));
for (i = 0; i < sizeImage*sizeImage; i++)
fscanf(fp, "%f", &f[i]);
CUDA_CHECK_RETURN(cudaMemcpy (dev_f , f , sizeImage*sizeImage*sizeof(float) , cudaMemcpyHostToDevice));
cudaEventRecord(start);
dt = 2.0/(nrays-1);
dtheta = PI/(nangles-1);
raysum<<<grid, blocks>>>(dev_f, dev_r, wdf, wdr, dtheta, dt, nrays);
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy (radon , dev_r , nangles*nrays*sizeof(float) , cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(stderr, "%f ms\n", milliseconds);
for ( i = 0; i < nrays ; i++){
for(j=0 ; j<nangles; j++){
fprintf(stdout, "%f ", radon[(nrays-1-i)*wdr + (nangles-1-j)]);
}
fprintf(stdout, "\n");
}
CUDA_CHECK_RETURN(cudaFree((void*) dev_r));
CUDA_CHECK_RETURN(cudaFree((void*) dev_f));
CUDA_CHECK_RETURN(cudaDeviceReset());
free(radon);
free(f);
fclose(fp);
return 0;
}
|
8eee339d2e6b3c90598d37a8b86a2d269f8cac1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ExampleUpdater_hip.cuh"
/*! \file ExampleUpdater.cu
\brief CUDA kernels for ExampleUpdater
*/
// First, the kernel code for zeroing the velocities on the GPU
//! Kernel that zeroes velocities on the GPU
/*! \param d_vel Velocity-mass array from the ParticleData
\param N Number of particles
This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size
as long as block_size * num_blocks is >= the number of particles.
*/
extern "C" __global__
void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
// vel.w is the mass, don't want to modify that
Scalar4 vel = d_vel[idx];
vel.x = vel.y = vel.z = 0.0f;
d_vel[idx] = vel;
}
}
/*! \param d_vel Velocity-mass array from the ParticleData
\param N Number of particles
This is just a driver for gpu_zero_velocities_kernel(), see it for the details
*/
hipError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_zero_velocities_kernel), dim3(grid), dim3(threads) , 0, 0, d_vel, N);
// this method always succeds. If you had a cuda* call in this driver, you could return its error code if not
// hipSuccess
return hipSuccess;
}
| 8eee339d2e6b3c90598d37a8b86a2d269f8cac1f.cu | // Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ExampleUpdater.cuh"
/*! \file ExampleUpdater.cu
\brief CUDA kernels for ExampleUpdater
*/
// First, the kernel code for zeroing the velocities on the GPU
//! Kernel that zeroes velocities on the GPU
/*! \param d_vel Velocity-mass array from the ParticleData
\param N Number of particles
This kernel executes one thread per particle and zeros the velocity of each. It can be run with any 1D block size
as long as block_size * num_blocks is >= the number of particles.
*/
extern "C" __global__
void gpu_zero_velocities_kernel(Scalar4 *d_vel, unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
// vel.w is the mass, don't want to modify that
Scalar4 vel = d_vel[idx];
vel.x = vel.y = vel.z = 0.0f;
d_vel[idx] = vel;
}
}
/*! \param d_vel Velocity-mass array from the ParticleData
\param N Number of particles
This is just a driver for gpu_zero_velocities_kernel(), see it for the details
*/
cudaError_t gpu_zero_velocities(Scalar4 *d_vel, unsigned int N)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (int)ceil((double)N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_zero_velocities_kernel<<< grid, threads >>>(d_vel, N);
// this method always succeds. If you had a cuda* call in this driver, you could return its error code if not
// cudaSuccess
return cudaSuccess;
}
|
6f1b42d200357b9e2bdfcc5d9e32e9b5d891617e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, hipcub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
hipLaunchKernelGGL(( global_maxpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.sizes()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.sizes().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
hipLaunchKernelGGL(( global_maxpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.sizes()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.sizes().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
| 6f1b42d200357b9e2bdfcc5d9e32e9b5d891617e.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <cub/cub.cuh>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, cub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
global_maxpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.sizes()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.sizes().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
global_maxpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->template mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.sizes()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.sizes().vec();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<at::Half>()) {
return DoRunWithType<at::Half, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<int64_t> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
|
0d86ab8ee5caedb4ef53f326c15c31b07d6d47ba.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "writeBoundary_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int startPos = 1;
int rLen = 1;
int *d_startArray = NULL;
hipMalloc(&d_startArray, XSIZE*YSIZE);
int *d_startSumArray = NULL;
hipMalloc(&d_startSumArray, XSIZE*YSIZE);
int *d_bounary = NULL;
hipMalloc(&d_bounary, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
writeBoundary_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, startPos,rLen,d_startArray,d_startSumArray,d_bounary);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
writeBoundary_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, startPos,rLen,d_startArray,d_startSumArray,d_bounary);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
writeBoundary_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, startPos,rLen,d_startArray,d_startSumArray,d_bounary);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0d86ab8ee5caedb4ef53f326c15c31b07d6d47ba.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "writeBoundary_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int startPos = 1;
int rLen = 1;
int *d_startArray = NULL;
cudaMalloc(&d_startArray, XSIZE*YSIZE);
int *d_startSumArray = NULL;
cudaMalloc(&d_startSumArray, XSIZE*YSIZE);
int *d_bounary = NULL;
cudaMalloc(&d_bounary, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
writeBoundary_kernel<<<gridBlock,threadBlock>>>(startPos,rLen,d_startArray,d_startSumArray,d_bounary);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
writeBoundary_kernel<<<gridBlock,threadBlock>>>(startPos,rLen,d_startArray,d_startSumArray,d_bounary);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
writeBoundary_kernel<<<gridBlock,threadBlock>>>(startPos,rLen,d_startArray,d_startSumArray,d_bounary);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9fabcac96335f4d5c837ac953f2ddcc5d06fc433.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
if (idata[index] == 0) {
bools[index] = 0;
}
else {
bools[index] = 1;
}
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n && bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
| 9fabcac96335f4d5c837ac953f2ddcc5d06fc433.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
if (idata[index] == 0) {
bools[index] = 0;
}
else {
bools[index] = 1;
}
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n && bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
|
270bf95de2ae2b8b8b39951dec8d0c407f395f89.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vecSum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double2 *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
double *factor = NULL;
hipMalloc(&factor, XSIZE*YSIZE);
double2 *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vecSum), dim3(gridBlock),dim3(threadBlock), 0, 0, in,factor,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vecSum), dim3(gridBlock),dim3(threadBlock), 0, 0, in,factor,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vecSum), dim3(gridBlock),dim3(threadBlock), 0, 0, in,factor,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 270bf95de2ae2b8b8b39951dec8d0c407f395f89.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vecSum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double2 *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
double *factor = NULL;
cudaMalloc(&factor, XSIZE*YSIZE);
double2 *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vecSum<<<gridBlock,threadBlock>>>(in,factor,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vecSum<<<gridBlock,threadBlock>>>(in,factor,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vecSum<<<gridBlock,threadBlock>>>(in,factor,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7b769d5486525f66f4a14c49e417d47db11a9768.hip | // !!! This is a file automatically generated by hipify!!!
/**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <unsupported/Eigen/CXX11/Tensor>
#include <TensorBase/benchmarks/BenchmarkGraphGpu.h>
using namespace TensorBase;
using namespace TensorBaseBenchmarks;
/* Benchmark for a Graph analytics use case with tables for Sparse indices represented as node in/out pairs, weights, node properties, and link properties.
A Kronecker graph generator is used to simulate the node and links according the scale and edge_factor parameters,
weights are randomly assigned in the range of [0, 1], and node and link properties are randomly assigned and represented by a string value.
Example usage:
BenchmarkGraph [data_dir] [scale] [edge_factor] [in_memory] [shard_size_perc] [n_engines]
BenchmarkGraph C:/Users/dmccloskey/Documents/GitHub/mnist/ S S true 100 2
@param[in] scale Options include XS, small, medium, large, and XL (i.e., 8, 14, 16, 20, 24, and 26, respectively) with default of small
@param[in] edge_factor Options include only 16 for now
@param[in] in_memory Simulate all data loaded into memory (true) or JIT load into memory from disk (false) with default of true
@param[in] shard_size_perc Different shard span configurations. Options include 5, 20, and 100 with a default of 100
@param[in] n_engines The number of CPUs or GPUs to use
*/
int main(int argc, char** argv)
{
// Parse the user commands
std::string data_dir = "C:/Users/dmccloskey/Documents/GitHub/mnist/";
int scale = 8;
int edge_factor = 16;
bool in_memory = true;
double shard_span_perc = 1;
int n_engines = 1;
parseCmdArgsGraph(argc, argv, data_dir, scale, edge_factor, in_memory, shard_span_perc, n_engines);
// Setup the device
hipStream_t stream;
gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
// Setup the Benchmarking suite, TensorCollectionGenerator, and run the application
BenchmarkGraph1LinkGpu benchmark_1_link;
GraphTensorCollectionGeneratorGpu tensor_collection_generator;
runBenchmarkGraph(data_dir, scale, edge_factor, in_memory, shard_span_perc, benchmark_1_link, tensor_collection_generator, device);
return 0;
}
#endif | 7b769d5486525f66f4a14c49e417d47db11a9768.cu | /**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <cuda.h>
#include <cuda_runtime.h>
#include <unsupported/Eigen/CXX11/Tensor>
#include <TensorBase/benchmarks/BenchmarkGraphGpu.h>
using namespace TensorBase;
using namespace TensorBaseBenchmarks;
/* Benchmark for a Graph analytics use case with tables for Sparse indices represented as node in/out pairs, weights, node properties, and link properties.
A Kronecker graph generator is used to simulate the node and links according the scale and edge_factor parameters,
weights are randomly assigned in the range of [0, 1], and node and link properties are randomly assigned and represented by a string value.
Example usage:
BenchmarkGraph [data_dir] [scale] [edge_factor] [in_memory] [shard_size_perc] [n_engines]
BenchmarkGraph C:/Users/dmccloskey/Documents/GitHub/mnist/ S S true 100 2
@param[in] scale Options include XS, small, medium, large, and XL (i.e., 8, 14, 16, 20, 24, and 26, respectively) with default of small
@param[in] edge_factor Options include only 16 for now
@param[in] in_memory Simulate all data loaded into memory (true) or JIT load into memory from disk (false) with default of true
@param[in] shard_size_perc Different shard span configurations. Options include 5, 20, and 100 with a default of 100
@param[in] n_engines The number of CPUs or GPUs to use
*/
int main(int argc, char** argv)
{
// Parse the user commands
std::string data_dir = "C:/Users/dmccloskey/Documents/GitHub/mnist/";
int scale = 8;
int edge_factor = 16;
bool in_memory = true;
double shard_span_perc = 1;
int n_engines = 1;
parseCmdArgsGraph(argc, argv, data_dir, scale, edge_factor, in_memory, shard_span_perc, n_engines);
// Setup the device
cudaStream_t stream;
gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
Eigen::GpuStreamDevice stream_device(&stream, 0);
Eigen::GpuDevice device(&stream_device);
// Setup the Benchmarking suite, TensorCollectionGenerator, and run the application
BenchmarkGraph1LinkGpu benchmark_1_link;
GraphTensorCollectionGeneratorGpu tensor_collection_generator;
runBenchmarkGraph(data_dir, scale, edge_factor, in_memory, shard_span_perc, benchmark_1_link, tensor_collection_generator, device);
return 0;
}
#endif |
8c5428219a030f8f48faca75c4e6e7d6079e9c09.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
__global__ void compute(int* x,int* y,int n){
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
int num=col+row*n;
int neighbor=0;
//cell in the middle has eight neighbors,
//a cell in a corner has only three neighbors,
//a cell on an edge has five neighbors.
if(col<n && row<n){
//corner 3
//In order to move to corner,
//it should move either diagonal or move left/right and move up/down
//top left corner
if(col==0 && row==0){
neighbor+=x[num+1]; //move right
neighbor+=x[num+n]; //move bottom
neighbor+=x[num+n+1]; //bottom right
}
//bottom left corner
else if(col==0 && row==n-1){
neighbor+=x[num+1]; //move right
neighbor+=x[num-n]; //move up
neighbor+=x[num-n+1]; //top right
}
//bottom right
else if(col==n-1 && row==n-1){
neighbor+=x[num-1]; //move left
neighbor+=x[num-n]; //move up
neighbor+=x[num-n-1]; //top left
}
//edge 5
//In order to move to edge
// it should just move to left/right/up/down (including corner)
//top edge
else if(row==0 && col>0 && col<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num+1]; //right
neighbor+=x[num+n-1]; //bottom left -- corner
neighbor+=x[num+n]; //bottom
neighbor+=x[num+n+1]; //bottom right -- corner
}
//bottom edge
else if(row==n-1 && col>0 && col<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num+1]; //right
neighbor+=x[num-n+1]; //Top right-- corner
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left -- corner
}
//Left edge
else if(col==0 && row>0 && row<n-1){
neighbor+=x[num+1]; //right
neighbor+=x[num-n]; //Top
neighbor+=x[num-n+1]; //Top right-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n+1]; //Bottom right-- corner
}
//Right edge
else if(col==n-1 && row>0 && row<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n-1]; //Bottom left-- corner
}
//Right edge
else if(col==n-1 && row>0 && row<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n-1]; //Bottom left-- corner
}
//cell in the middle has eight neighbors,
else{
neighbor+=x[num-1];//left
neighbor+=x[num+1]; //right
neighbor+=x[num-n-1];//top left
neighbor+=x[num-n]; //top
neighbor+=x[num-n+1]; //top right
neighbor+=x[num+n-1]; //bottom left
neighbor+=x[num+n]; //bottom
neighbor+=x[num+n+1]; //bottom right
}
//Die : 0
//Live: 1
//A live cell with zero or one live neighbor dies from loneliness.
if(x[num]==1 && (neighbor ==0 || neighbor ==1))
y[num]=0;
//A live cell with four or more live neighbors dies due to overpopulation.
else if(x[num]==1 && neighbor>=4)
y[num]=0;
//A dead cell with two or three live neighbors becomes alive.
else if(x[num]==1 && (neighbor==2 || neighbor==3))
y[num]=1;
//Otherwise, a cell's state stays unchanged
else
y[num] = x[num];
}
}
int main(void){
int i,j,k;
int row= 6;
int col= 4;
int start[row][col];
int Round[row][col];
dim3 threadsPerBlock(32,32);
dim3 numBlocks(row/threadsPerBlock.x,col/threadsPerBlock.x);
int* x;
int* y;
int generation =1;// maximum generation/iteration
float milliseconds=0;
hipEvent_t t_start,t_stop;
hipEventCreate(&t_start);
hipEventCreate(&t_stop);
//Initilazie the matrix of the x Generated cell
for(i=0;i<row;i++)
for(j=0;j<col;j++)
start[i][j]=rand()%2;
//Initilazie the matrix of the y Generated cell
for(i=0;i<row;i++)
for(j=0;j<col;j++)
Round[i][j]=0;
hipMalloc((void **) &x,sizeof(int)*row*col);
hipMemcpy(x,start,sizeof(int)*row*col,hipMemcpyHostToDevice);
printf("Start\n");
printf("-------\n");
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
{
if(start[i][j]){
printf(" 0");
}
else{
printf(" 1");
}
}
printf("\n");
}
for(k=0;k<= generation;k++)
{
hipEventRecord(t_start);
hipLaunchKernelGGL(( compute), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, y,y,row);
hipEventRecord(t_stop);
hipMalloc((void **) &y,sizeof(int)*row*col);
hipMemcpy(Round,y,sizeof(int)*row*col,hipMemcpyDeviceToHost);
printf("\n Round %d \n",k);
printf("-------\n");
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
{
if(Round[i][j])
{
printf(" 0");
}
else{
printf(" 1");
}
}
printf("\n");
}
hipEventElapsedTime(&milliseconds,t_start,t_stop);
printf("Time taken for this computation = %f milliseconds\n\n",milliseconds);
}
return 0;
}
| 8c5428219a030f8f48faca75c4e6e7d6079e9c09.cu | #include<stdio.h>
#include <cuda.h>
#include <sys/time.h>
__global__ void compute(int* x,int* y,int n){
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
int num=col+row*n;
int neighbor=0;
//cell in the middle has eight neighbors,
//a cell in a corner has only three neighbors,
//a cell on an edge has five neighbors.
if(col<n && row<n){
//corner 3
//In order to move to corner,
//it should move either diagonal or move left/right and move up/down
//top left corner
if(col==0 && row==0){
neighbor+=x[num+1]; //move right
neighbor+=x[num+n]; //move bottom
neighbor+=x[num+n+1]; //bottom right
}
//bottom left corner
else if(col==0 && row==n-1){
neighbor+=x[num+1]; //move right
neighbor+=x[num-n]; //move up
neighbor+=x[num-n+1]; //top right
}
//bottom right
else if(col==n-1 && row==n-1){
neighbor+=x[num-1]; //move left
neighbor+=x[num-n]; //move up
neighbor+=x[num-n-1]; //top left
}
//edge 5
//In order to move to edge
// it should just move to left/right/up/down (including corner)
//top edge
else if(row==0 && col>0 && col<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num+1]; //right
neighbor+=x[num+n-1]; //bottom left -- corner
neighbor+=x[num+n]; //bottom
neighbor+=x[num+n+1]; //bottom right -- corner
}
//bottom edge
else if(row==n-1 && col>0 && col<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num+1]; //right
neighbor+=x[num-n+1]; //Top right-- corner
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left -- corner
}
//Left edge
else if(col==0 && row>0 && row<n-1){
neighbor+=x[num+1]; //right
neighbor+=x[num-n]; //Top
neighbor+=x[num-n+1]; //Top right-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n+1]; //Bottom right-- corner
}
//Right edge
else if(col==n-1 && row>0 && row<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n-1]; //Bottom left-- corner
}
//Right edge
else if(col==n-1 && row>0 && row<n-1){
neighbor+=x[num-1]; //left
neighbor+=x[num-n]; //Top
neighbor+=x[num-n-1]; //Top left-- corner
neighbor+=x[num+n]; //Bottom
neighbor+=x[num+n-1]; //Bottom left-- corner
}
//cell in the middle has eight neighbors,
else{
neighbor+=x[num-1];//left
neighbor+=x[num+1]; //right
neighbor+=x[num-n-1];//top left
neighbor+=x[num-n]; //top
neighbor+=x[num-n+1]; //top right
neighbor+=x[num+n-1]; //bottom left
neighbor+=x[num+n]; //bottom
neighbor+=x[num+n+1]; //bottom right
}
//Die : 0
//Live: 1
//A live cell with zero or one live neighbor dies from loneliness.
if(x[num]==1 && (neighbor ==0 || neighbor ==1))
y[num]=0;
//A live cell with four or more live neighbors dies due to overpopulation.
else if(x[num]==1 && neighbor>=4)
y[num]=0;
//A dead cell with two or three live neighbors becomes alive.
else if(x[num]==1 && (neighbor==2 || neighbor==3))
y[num]=1;
//Otherwise, a cell's state stays unchanged
else
y[num] = x[num];
}
}
int main(void){
int i,j,k;
int row= 6;
int col= 4;
int start[row][col];
int Round[row][col];
dim3 threadsPerBlock(32,32);
dim3 numBlocks(row/threadsPerBlock.x,col/threadsPerBlock.x);
int* x;
int* y;
int generation =1;// maximum generation/iteration
float milliseconds=0;
cudaEvent_t t_start,t_stop;
cudaEventCreate(&t_start);
cudaEventCreate(&t_stop);
//Initilazie the matrix of the x Generated cell
for(i=0;i<row;i++)
for(j=0;j<col;j++)
start[i][j]=rand()%2;
//Initilazie the matrix of the y Generated cell
for(i=0;i<row;i++)
for(j=0;j<col;j++)
Round[i][j]=0;
cudaMalloc((void **) &x,sizeof(int)*row*col);
cudaMemcpy(x,start,sizeof(int)*row*col,cudaMemcpyHostToDevice);
printf("Start\n");
printf("-------\n");
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
{
if(start[i][j]){
printf(" 0");
}
else{
printf(" 1");
}
}
printf("\n");
}
for(k=0;k<= generation;k++)
{
cudaEventRecord(t_start);
compute<<<numBlocks,threadsPerBlock>>>(y,y,row);
cudaEventRecord(t_stop);
cudaMalloc((void **) &y,sizeof(int)*row*col);
cudaMemcpy(Round,y,sizeof(int)*row*col,cudaMemcpyDeviceToHost);
printf("\n Round %d \n",k);
printf("-------\n");
for(i=0;i<row;i++)
{
for(j=0;j<col;j++)
{
if(Round[i][j])
{
printf(" 0");
}
else{
printf(" 1");
}
}
printf("\n");
}
cudaEventElapsedTime(&milliseconds,t_start,t_stop);
printf("Time taken for this computation = %f milliseconds\n\n",milliseconds);
}
return 0;
}
|
8719130ab8370f87ba8e5fcddd53c8d282ca5b0b.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/driver.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned;
std::vector<hipStream_t> streams{};
common::Monitor monitor;
TreeEvaluator tree_evaluator;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
// Storing split categories for last node.
dh::caching_device_vector<uint32_t> node_categories;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
common::Span<FeatureType const> _feature_types,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
tree_evaluator(param, n_features, _device_id),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
feature_groups.reset(new FeatureGroups(
page->Cuts(), page->is_dense, dh::MaxSharedMemoryOptin(device_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weigths.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
tree_evaluator = TreeEvaluator(param, dmat->Info().num_col_, device_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = RegTree::kRoot;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx)};
auto gain_calc = tree_evaluator.GetEvaluator<GPUTrainingParam>();
EvaluateSingleSplit(dh::ToSpan(splits_out), gain_calc, inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
hipMemcpyDeviceToHost));
return result.front();
}
void EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree,
common::Span<ExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{
left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx)};
auto d_splits_out = dh::ToSpan(splits_out);
EvaluateSplits(d_splits_out, tree_evaluator.GetEvaluator<GPUTrainingParam>(), left, right);
dh::TemporaryArray<ExpandEntry> entries(2);
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
auto d_entries = entries.data().get();
dh::LaunchN(device_id, 2, [=] __device__(size_t idx) {
auto split = d_splits_out[idx];
auto nidx = idx == 0 ? left_nidx : right_nidx;
float base_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.left_sum + split.right_sum});
float left_weight =
evaluator.CalcWeight(nidx, gpu_param, GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.right_sum});
d_entries[idx] =
ExpandEntry{nidx, candidate.depth + 1, d_splits_out[idx],
base_weight, left_weight, right_weight};
});
dh::safe_cuda(hipMemcpyAsync(
pinned_candidates_out.data(), entries.data().get(),
sizeof(ExpandEntry) * entries.size(), hipMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id),
feature_groups->DeviceAccessor(device_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree* p_tree) {
RegTree::Node split_node = (*p_tree)[nidx];
auto split_type = p_tree->NodeSplitType(nidx);
auto d_matrix = page->GetDeviceAccessor(device_id);
auto node_cats = dh::ToSpan(node_categories);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
bst_node_t new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
bool go_left = true;
if (split_type == FeatureType::kCategorical) {
go_left = common::Decision(node_cats, common::AsCat(cut_value));
} else {
go_left = cut_value <= split_node.SplitCond();
}
if (go_left) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
}
}
}
void FinalisePositionInPage(EllpackPageImpl *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats =
categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, common::AsCat(element));
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(common::Span<bst_float> out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
GPUTrainingParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
hipMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
dh::LaunchN(
device_id, out_preds_d.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = evaluator.CalcWeight(pos, param_d,
GradStats{d_node_sum_gradients[pos]});
out_preds_d[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
auto evaluator = tree_evaluator.GetEvaluator();
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
auto cat = common::AsCat(candidate.split.fvalue);
std::vector<uint32_t> split_cats(LBitField32::ComputeStorageSize(::max(cat+1, 1)), 0);
LBitField32 cats_bits(split_cats);
cats_bits.Set(cat);
dh::CopyToD(split_cats, &node_categories);
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats,
candidate.split.dir == kLeftDir, base_weight, left_weight,
right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
}
// Set up child constraints
auto left_child = tree[candidate.nid].LeftChild();
auto right_child = tree[candidate.nid].RightChild();
tree_evaluator.AddSplit(candidate.nid, left_child, right_child,
tree[candidate.nid].SplitIndex(), candidate.left_weight,
candidate.right_weight);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
ExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::hip::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
dh::TemporaryArray<ExpandEntry> entries(1);
auto d_entries = entries.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
GPUTrainingParam gpu_param(param);
auto depth = p_tree->GetDepth(kRootNIdx);
dh::LaunchN(device_id, 1, [=] __device__(size_t idx) {
float left_weight = evaluator.CalcWeight(kRootNIdx, gpu_param,
GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
kRootNIdx, gpu_param, GradStats{split.right_sum});
d_entries[0] =
ExpandEntry(kRootNIdx, depth, split,
weight, left_weight, right_weight);
});
ExpandEntry root_entry;
dh::safe_cuda(hipMemcpyAsync(
&root_entry, entries.data().get(),
sizeof(ExpandEntry) * entries.size(), hipMemcpyDeviceToHost));
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
Driver driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<ExpandEntry>(expand_set.size() * 2, ExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = ExpandEntry();
new_candidates[i * 2 + 1] = ExpandEntry();
}
}
dh::safe_cuda(hipDeviceSynchronize());
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(device_));
info_->feature_types.SetDevice(device_);
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->feature_types.ConstDeviceSpan(),
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DeviceSpan());
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| 8719130ab8370f87ba8e5fcddd53c8d282ca5b0b.cu | /*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/driver.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned;
std::vector<cudaStream_t> streams{};
common::Monitor monitor;
TreeEvaluator tree_evaluator;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
// Storing split categories for last node.
dh::caching_device_vector<uint32_t> node_categories;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
common::Span<FeatureType const> _feature_types,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
tree_evaluator(param, n_features, _device_id),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
feature_groups.reset(new FeatureGroups(
page->Cuts(), page->is_dense, dh::MaxSharedMemoryOptin(device_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weigths.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
tree_evaluator = TreeEvaluator(param, dmat->Info().num_col_, device_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = RegTree::kRoot;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx)};
auto gain_calc = tree_evaluator.GetEvaluator<GPUTrainingParam>();
EvaluateSingleSplit(dh::ToSpan(splits_out), gain_calc, inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
cudaMemcpyDeviceToHost));
return result.front();
}
void EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree,
common::Span<ExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{
left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx)};
auto d_splits_out = dh::ToSpan(splits_out);
EvaluateSplits(d_splits_out, tree_evaluator.GetEvaluator<GPUTrainingParam>(), left, right);
dh::TemporaryArray<ExpandEntry> entries(2);
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
auto d_entries = entries.data().get();
dh::LaunchN(device_id, 2, [=] __device__(size_t idx) {
auto split = d_splits_out[idx];
auto nidx = idx == 0 ? left_nidx : right_nidx;
float base_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.left_sum + split.right_sum});
float left_weight =
evaluator.CalcWeight(nidx, gpu_param, GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.right_sum});
d_entries[idx] =
ExpandEntry{nidx, candidate.depth + 1, d_splits_out[idx],
base_weight, left_weight, right_weight};
});
dh::safe_cuda(cudaMemcpyAsync(
pinned_candidates_out.data(), entries.data().get(),
sizeof(ExpandEntry) * entries.size(), cudaMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id),
feature_groups->DeviceAccessor(device_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree* p_tree) {
RegTree::Node split_node = (*p_tree)[nidx];
auto split_type = p_tree->NodeSplitType(nidx);
auto d_matrix = page->GetDeviceAccessor(device_id);
auto node_cats = dh::ToSpan(node_categories);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
bst_node_t new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
bool go_left = true;
if (split_type == FeatureType::kCategorical) {
go_left = common::Decision(node_cats, common::AsCat(cut_value));
} else {
go_left = cut_value <= split_node.SplitCond();
}
if (go_left) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
}
}
}
void FinalisePositionInPage(EllpackPageImpl *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats =
categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, common::AsCat(element));
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(common::Span<bst_float> out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
GPUTrainingParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
cudaMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
dh::LaunchN(
device_id, out_preds_d.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = evaluator.CalcWeight(pos, param_d,
GradStats{d_node_sum_gradients[pos]});
out_preds_d[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
auto evaluator = tree_evaluator.GetEvaluator();
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
auto cat = common::AsCat(candidate.split.fvalue);
std::vector<uint32_t> split_cats(LBitField32::ComputeStorageSize(std::max(cat+1, 1)), 0);
LBitField32 cats_bits(split_cats);
cats_bits.Set(cat);
dh::CopyToD(split_cats, &node_categories);
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats,
candidate.split.dir == kLeftDir, base_weight, left_weight,
right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
}
// Set up child constraints
auto left_child = tree[candidate.nid].LeftChild();
auto right_child = tree[candidate.nid].RightChild();
tree_evaluator.AddSplit(candidate.nid, left_child, right_child,
tree[candidate.nid].SplitIndex(), candidate.left_weight,
candidate.right_weight);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
ExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::cuda::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
dh::TemporaryArray<ExpandEntry> entries(1);
auto d_entries = entries.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
GPUTrainingParam gpu_param(param);
auto depth = p_tree->GetDepth(kRootNIdx);
dh::LaunchN(device_id, 1, [=] __device__(size_t idx) {
float left_weight = evaluator.CalcWeight(kRootNIdx, gpu_param,
GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
kRootNIdx, gpu_param, GradStats{split.right_sum});
d_entries[0] =
ExpandEntry(kRootNIdx, depth, split,
weight, left_weight, right_weight);
});
ExpandEntry root_entry;
dh::safe_cuda(cudaMemcpyAsync(
&root_entry, entries.data().get(),
sizeof(ExpandEntry) * entries.size(), cudaMemcpyDeviceToHost));
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
Driver driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<ExpandEntry>(expand_set.size() * 2, ExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = ExpandEntry();
new_candidates[i * 2 + 1] = ExpandEntry();
}
}
dh::safe_cuda(cudaDeviceSynchronize());
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(device_));
info_->feature_types.SetDevice(device_);
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->feature_types.ConstDeviceSpan(),
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DeviceSpan());
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
feaf8912821790d1b8635b1bec47b3834504ae3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* VolumeMeshRenderer.cu
*
* Copyright (C) 2012 by Universitaet Stuttgart (VIS).
* Alle Rechte vorbehalten.
*/
#ifndef MEGAMOLPROTEIN_VOLUMEMESHRENDERER_CU_INCLUDED
#define MEGAMOLPROTEIN_VOLUMEMESHRENDERER_CU_INCLUDED
#include "VolumeMeshRenderer.cuh"
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/device_ptr.h>
#include "helper_math.h"
#ifdef CUDA_NO_SM_11_ATOMIC_INTRINSICS
#error "Atomic intrinsics are missing (nvcc -arch=sm_11)"
#endif
#define GT_THREADS 128
/*
* Variables.
*/
//texture<float4, hipTextureType3D, hipReadModeElementType> volumeTex;
//texture<float, hipTextureType1D, hipReadModeElementType> volumeTex;
__device__ float* volumeTex;
__device__ int* neighborAtomTex;
texture<float, hipTextureType3D, hipReadModeElementType> aoVolumeTex;
__device__ __constant__ uint3 dVolumeSize;
hipError_t copyVolSizeToDevice( uint3 volSize) {
hipError_t error = hipMemcpyToSymbol( dVolumeSize, (void*)&volSize, sizeof(uint3));
hipDeviceSynchronize();
return error;
}
hipError_t copyVolSizeFromDevice( uint3 &volSize) {
volSize = make_uint3( 0, 0, 0);
hipError_t error = hipMemcpyFromSymbol( (void*)&volSize, dVolumeSize, sizeof(uint3));
hipDeviceSynchronize();
return error;
}
/*
* Voxel index and access functions.
*/
inline __device__
uint Index()
{
return __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x;
}
inline __device__
float VoxelValue(float3 index)
{
uint idx = index.z * dVolumeSize.x * dVolumeSize.y + index.y * dVolumeSize.x + index.x;
return volumeTex[idx];//tex1Dfetch(volumeTex, idx);
}
inline __device__
int NeighborAtom(float3 index)
{
uint idx = index.z * dVolumeSize.x * dVolumeSize.y + index.y * dVolumeSize.x + index.x;
return neighborAtomTex[idx];
}
inline __device__
float3 VoxelGradient(float3 index)
{
float3 result;
// Compute central difference.
result.x = VoxelValue(make_float3(index.x - 1.0f, index.y, index.z))
- VoxelValue(make_float3(index.x + 1.0f, index.y, index.z));
result.y = VoxelValue(make_float3(index.x, index.y - 1.0f, index.z))
- VoxelValue(make_float3(index.x, index.y + 1.0f, index.z));
result.z = VoxelValue(make_float3(index.x, index.y, index.z - 1.0f))
- VoxelValue(make_float3(index.x, index.y, index.z + 1.0f));
return result;
}
/*
* Marching tetrahedrons.
*/
inline __device__
float3 CubeVertex0(uint index)
{
return make_float3(index % dVolumeSize.x, (index / dVolumeSize.x) % dVolumeSize.y,
index / (dVolumeSize.x * dVolumeSize.y));
}
__global__
void ClassifyCubes_kernel(uint* cubeStates, float thresholdValue, uint cubeCount)
{
const uint cubeIndex = min(Index(), cubeCount - 1);
const float3 cubeVertex0 = CubeVertex0(cubeIndex);
const float cubeVertexOffsetsL[8][3] = {
{0, 0, 0},
{1, 0, 0},
{1, 1, 0},
{0, 1, 0},
{0, 0, 1},
{1, 0, 1},
{1, 1, 1},
{0, 1, 1}
};
// Add "vertex states" of a cube (0: inactive, 1: active).
unsigned char cubeFlags = static_cast<uint>(VoxelValue(cubeVertex0) <= thresholdValue);
for (int cubeVertexIndex = 1; cubeVertexIndex < 8; ++cubeVertexIndex) {
const float3 cubeVertex = make_float3(
cubeVertex0.x + cubeVertexOffsetsL[cubeVertexIndex][0],
cubeVertex0.y + cubeVertexOffsetsL[cubeVertexIndex][1],
cubeVertex0.z + cubeVertexOffsetsL[cubeVertexIndex][2]);
cubeFlags |= static_cast<uint>(VoxelValue(cubeVertex) <= thresholdValue) * (1 << cubeVertexIndex);
}
// Reduce "vertex states" to a "cube state".
cubeStates[cubeIndex] = min(cubeFlags % 255, 1);
}
__global__
void CompactCubes_kernel(uint* cubeMap, uint* cubeOffsets, uint* cubeStates, uint cubeCount)
{
const uint cubeIndex = min(Index(), cubeCount - 1);
if (cubeStates[cubeIndex] != 0) {
// Map from active cubes list to cube index.
cubeMap[cubeOffsets[cubeIndex]] = cubeIndex;
}
}
__device__ __shared__ float3 cubeVertexOffsets[8];
__device__ __constant__ float cubeVertexOffsetsC[8][3] = {
{0, 0, 0},
{1, 0, 0},
{1, 1, 0},
{0, 1, 0},
{0, 0, 1},
{1, 0, 1},
{1, 1, 1},
{0, 1, 1}
};
inline __device__
void LoadCubeOffsets()
{
// Load cube vertex offsets into shared memory.
if (threadIdx.x < 8) {
cubeVertexOffsets[threadIdx.x] = make_float3(cubeVertexOffsetsC[threadIdx.x][0],
cubeVertexOffsetsC[threadIdx.x][1], cubeVertexOffsetsC[threadIdx.x][2]);
}
}
__device__ __shared__ uint tetrahedronsInACube[6][4];
__device__ __constant__ uint tetrahedronsInACubeC[6][4] = {
{0, 5, 1, 6},
{0, 1, 2, 6},
{0, 2, 3, 6},
{0, 3, 7, 6},
{0, 7, 4, 6},
{0, 4, 5, 6}
};
inline __device__
void LoadTetrahedronsInACube()
{
// Load tetrahedron vertex index to cube index map into shared memory.
if (threadIdx.x < 6) {
for (int i = 0; i < 4; ++i) {
tetrahedronsInACube[threadIdx.x][i] = tetrahedronsInACubeC[threadIdx.x][i];
}
}
}
__device__ __shared__ uint tetrahedronVertexCount[16];
__device__ __constant__ uint tetrahedronVertexCountC[16] = {
0, 3, 3, 6, 3, 6, 6, 3,
3, 6, 6, 3, 6, 3, 3, 0
};
inline __device__
void LoadTetrahedronVertexCount()
{
// Load tetrahedron vertex count into shared memory.
if (threadIdx.x < 16) {
tetrahedronVertexCount[threadIdx.x] = tetrahedronVertexCountC[threadIdx.x];
}
}
inline __device__
unsigned char TetrahedronFlags(float3 cubeVertex0, int tetrahedronIndex, float thresholdValue)
{
unsigned char flags = 0;
for (int tetrahedronVertexIndex = 0; tetrahedronVertexIndex < 4; ++tetrahedronVertexIndex) {
const float3 cubeVertexOffset = cubeVertexOffsets[tetrahedronsInACube[tetrahedronIndex][tetrahedronVertexIndex]];
if (VoxelValue(cubeVertex0 + cubeVertexOffset) <= thresholdValue) {
flags |= 1 << static_cast<unsigned char>(tetrahedronVertexIndex);
}
}
return flags;
}
__global__
void ClassifyTetrahedronsInACube_kernel(uint* verticesPerTetrahedron, uint* cubeMap, float thresholdValue, uint activeCubeCount)
{
const uint activeCubeIndex = Index();
const float3 cubeVertex0 = CubeVertex0(cubeMap[activeCubeIndex]);
LoadCubeOffsets();
LoadTetrahedronsInACube();
LoadTetrahedronVertexCount();
__syncthreads();
// Prevent non-power of two writes.
if (activeCubeIndex >= activeCubeCount) {
return;
}
// Classify all tetrahedrons in a cube.
for (int tetrahedronIndex = 0; tetrahedronIndex < 6; ++tetrahedronIndex) {
// Compute tetrahedron flags.
unsigned char tetrahedronFlags = TetrahedronFlags(cubeVertex0, tetrahedronIndex, thresholdValue);
// Store number of vertices.
verticesPerTetrahedron[activeCubeIndex * 6 + tetrahedronIndex] = tetrahedronVertexCount[tetrahedronFlags];
}
}
__device__ __shared__ unsigned char tetrahedronEdgeFlags[16];
__device__ __constant__ unsigned char tetrahedronEdgeFlagsC[16] = {
0x00, 0x0d, 0x13, 0x1e, 0x26, 0x2b, 0x35, 0x38,
0x38, 0x35, 0x2b, 0x26, 0x1e, 0x13, 0x0d, 0x00
};
__device__ __shared__ char tetrahedronEdgeConnections[6][2];
__device__ __constant__ char tetrahedronEdgeConnectionsC[6][2] = {
{0, 1}, {1, 2}, {2, 0}, {0, 3}, {1, 3}, {2, 3}
};
inline __device__
void LoadTetrahedronEdgeFlagsAndConnections()
{
// Load tetrahedron edge flags into shared memory.
if (threadIdx.x < 16) {
tetrahedronEdgeFlags[threadIdx.x] = tetrahedronEdgeFlagsC[threadIdx.x];
}
// Load tetrahedron edge connection table into shared memory.
if (threadIdx.x < 6) {
tetrahedronEdgeConnections[threadIdx.x][0] = tetrahedronEdgeConnectionsC[threadIdx.x][0];
tetrahedronEdgeConnections[threadIdx.x][1] = tetrahedronEdgeConnectionsC[threadIdx.x][1];
}
}
__device__ __shared__ char tetrahedronTriangles[16][6];
__device__ __constant__ char tetrahedronTrianglesC[16][6] = {
{-1, -1, -1, -1, -1, -1},
{ 0, 3, 2, -1, -1, -1},
{ 0, 1, 4, -1, -1, -1},
{ 1, 4, 2, 2, 4, 3},
{ 1, 2, 5, -1, -1, -1},
{ 0, 3, 5, 0, 5, 1},
{ 0, 2, 5, 0, 5, 4},
{ 5, 4, 3, -1, -1, -1},
{ 3, 4, 5, -1, -1, -1},
{ 4, 5, 0, 5, 2, 0},
{ 1, 5, 0, 5, 3, 0},
{ 5, 2, 1, -1, -1, -1},
{ 3, 4, 2, 2, 4, 1},
{ 4, 1, 0, -1, -1, -1},
{ 2, 3, 0, -1, -1, -1},
{-1, -1, -1, -1, -1, -1}
};
inline __device__
void LoadTetrahedronTriangles()
{
// Load tetrahedron triangle table into shared memory.
if (threadIdx.x < 16) {
for (int i = 0; i < 6; ++i) {
tetrahedronTriangles[threadIdx.x][i] = tetrahedronTrianglesC[threadIdx.x][i];
}
}
}
__global__
void GenerateTriangles_kernel(float4* vertices, int* neighborAtom, float4* normals, float translateX, float translateY, float translateZ,
float scaleX, float scaleY, float scaleZ, uint* vertexOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const uint id = Index();
const uint activeCubeIndex = id / 6;
const int tetrahedronIndex = id % 6;
const float3 cubeVertex0 = CubeVertex0(cubeMap[activeCubeIndex]);
LoadCubeOffsets();
LoadTetrahedronsInACube();
LoadTetrahedronEdgeFlagsAndConnections();
LoadTetrahedronTriangles();
__syncthreads();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
unsigned char tetrahedronFlags = TetrahedronFlags(cubeVertex0, tetrahedronIndex, thresholdValue);
// Skip inaktive tetrahedrons.
if (tetrahedronFlags == 0x00 || tetrahedronFlags == 0x0F) {
return;
}
__shared__ float3 edgeVertex[6 * GT_THREADS];
// temporary storage for edge vertex neighbor atom
__shared__ int edgeVertexNeighborAtom[6 * GT_THREADS];
__shared__ float3 edgeNormal[6 * GT_THREADS];
// Find intersection of the surface with each edge.
for (int edgeIndex = 0; edgeIndex < 6; edgeIndex++) {
// Test if edge intersects with surface.
if (tetrahedronEdgeFlags[tetrahedronFlags] & (1 << static_cast<unsigned char>(edgeIndex))) {
// Interpolate vertex.
const float3 v0 = cubeVertex0 + cubeVertexOffsets[tetrahedronsInACube[tetrahedronIndex][tetrahedronEdgeConnections[edgeIndex][0]]];
const float3 v1 = cubeVertex0 + cubeVertexOffsets[tetrahedronsInACube[tetrahedronIndex][tetrahedronEdgeConnections[edgeIndex][1]]];
const float f0 = VoxelValue(v0);
const float interpolator = (thresholdValue - f0) / (VoxelValue(v1) - f0);
float3 vertex = lerp(make_float3(v0.x, v0.y, v0.z), make_float3(v1.x, v1.y, v1.z), interpolator);
edgeVertex[threadIdx.x * 6 + edgeIndex] = vertex;
// store nearest atom per edge vertex
edgeVertexNeighborAtom[threadIdx.x * 6 + edgeIndex] = (interpolator < 0.5) ? NeighborAtom(v0) : NeighborAtom(v1);
// Compute normal from gradient.
edgeNormal[threadIdx.x * 6 + edgeIndex] = normalize(lerp(VoxelGradient(v0), VoxelGradient(v1), interpolator));
}
}
// Write vertices.
for (int triangleIndex = 0; triangleIndex < 2; triangleIndex++) {
if (tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex] >= 0) {
//int edgeIndex0 = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + 0];
//int edgeIndex1 = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + 1];
//int edgeIndex2 = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + 2];
//float3 faceNormal = cross(edgeVertex[edgeIndex1] - edgeVertex[edgeIndex0], edgeVertex[edgeIndex2] - edgeVertex[edgeIndex0]);
for (int cornerIndex = 0; cornerIndex < 3; cornerIndex++) {
int edgeIndex = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + cornerIndex];
uint vertexOffset = vertexOffsets[id] + 3 * triangleIndex + cornerIndex;
//vertices[vertexOffset] = make_float4(translateX + edgeVertex[edgeIndex].x * scaleX,
// translateY + edgeVertex[edgeIndex].y * scaleY,
// translateZ + edgeVertex[edgeIndex].z * scaleZ, 1.0f);
//vertices[vertexOffset] = make_float4( edgeVertex[edgeIndex].x / 128.0f,
// edgeVertex[edgeIndex].y / 128.0f ,
// edgeVertex[edgeIndex].z / 128.0f, 1.0f);
vertices[vertexOffset] = make_float4( edgeVertex[edgeIndex].x,
edgeVertex[edgeIndex].y, edgeVertex[edgeIndex].z, 1.0f);
// store nearest atom per output vertex
neighborAtom[vertexOffset] = edgeVertexNeighborAtom[edgeIndex];
//normals[vertexOffset] = make_float4(faceNormal.x, faceNormal.y, faceNormal.z, 0.0f);
normals[vertexOffset] = make_float4(edgeNormal[edgeIndex].x,
edgeNormal[edgeIndex].y, edgeNormal[edgeIndex].z, 0.0f);
}
}
}
}
/*
* Mesh scanning.
*/
__global__
void MeshReset_kernel(uint* eqList, uint* refList, uint tetrahedronCount, uint* vertexOffsets, float* triangleAO)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
// TODO: fix this! -> MeshScan should work correctly for different AO factors in one tetrahedron!
// Set the ambient occlusion values for all triangles to the same value (average of all individual triangle AO factors)
float aoValue = 0.0;
for (uint vertexOffset = vertexOffsets[id]; vertexOffset < vertexOffsets[id + 1]; vertexOffset += 3) {
aoValue += triangleAO[vertexOffset/3];
}
aoValue /= float(( vertexOffsets[id + 1] - vertexOffsets[id]) / 3);
for (uint vertexOffset = vertexOffsets[id]; vertexOffset < vertexOffsets[id + 1]; vertexOffset += 3) {
triangleAO[vertexOffset/3] = aoValue;
}
eqList[id] = id;
refList[id] = id;
}
__device__ __shared__ unsigned char tetrahedronFaceMask[4];
__device__ __constant__ unsigned char tetrahedronFaceMaskC[4] = {
0x07, 0x0b, 0x0d, 0x0e
};
// This macro greatly simplifies the adjacent neighbour lookup by computing an
// offset from the cube's index (not the tetrahedron's global index) using a
// cube offsets and a local tetrahedron index.
#define TFN(t, cX, cY, cZ) {t, cX, cY, cZ}
__device__ __shared__ int tetrahedronFaceNeighbours[6][4][4];
__device__ __constant__ int tetrahedronFaceNeighboursC[6][4][4] = {
{TFN(2, 0, -1, 0), TFN(5, 0, 0, 0), TFN(1, 0, 0, 0), TFN(4, 1, 0, 0)},
{TFN(5, 0, 0, -1), TFN(0, 0, 0, 0), TFN(2, 0, 0, 0), TFN(3, 1, 0, 0)},
{TFN(4, 0, 0, -1), TFN(1, 0, 0, 0), TFN(3, 0, 0, 0), TFN(0, 0, 1, 0)},
{TFN(1, -1, 0, 0), TFN(2, 0, 0, 0), TFN(4, 0, 0, 0), TFN(5, 0, 1, 0)},
{TFN(0, -1, 0, 0), TFN(3, 0, 0, 0), TFN(5, 0, 0, 0), TFN(2, 0, 0, 1)},
{TFN(3, 0, -1, 0), TFN(4, 0, 0, 0), TFN(0, 0, 0, 0), TFN(1, 0, 0, 1)}
};
#undef TFN
inline __device__
void LoadTetrahedronNeighbours()
{
// Load tetrahedron face masks into shared memory.
if (threadIdx.x < 4) {
tetrahedronFaceMask[threadIdx.x] = tetrahedronFaceMaskC[threadIdx.x];
}
// Load tetrahedron face neighbours table into shared memory.
if (threadIdx.x < 6) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
tetrahedronFaceNeighbours[threadIdx.x][i][j] = tetrahedronFaceNeighboursC[threadIdx.x][i][j];
}
}
}
}
inline __device__
float AOValue(float3 position, float3 normal) {
//TODO: double check this scale * 4 hack.
float3 scale = make_float3(1.0f / dVolumeSize.x,
1.0f / dVolumeSize.y, 1.0f / dVolumeSize.z);
float x = position.x * scale.x + normal.x * scale.x;
float y = position.y * scale.y + normal.y * scale.y;
float z = position.z * scale.z + normal.z * scale.z;
return tex3D(aoVolumeTex, x, y, z);
//float aofac = 0.0f;
//for ( uint i = 0; i < 10; i++ ) {
// x = position.x * scale.x + normal.x * scale.x * ( 1.0f / 10.0f);
// y = position.y * scale.y + normal.y * scale.y * ( 1.0f / 10.0f);
// z = position.z * scale.z + normal.z * scale.z * ( 1.0f / 10.0f);
// aofac = max( aofac, tex3D(aoVolumeTex, x, y, z));
//}
//return aofac;
}
__global__
void MeshScan_kernel(float4* vertices, float4* normals, uint* vertexOffsets, float* triangleAO, float aoThreshold, uint* eqList, uint* refList,
bool* modified, uint* cubeStates, uint* cubeOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const uint id = Index();
const uint activeCubeIndex = id / 6;
const int tetrahedronIndex = id % 6;
const float3 cubeVertex0 = CubeVertex0(cubeMap[activeCubeIndex]);
const uint label1 = eqList[id];
uint label2 = ~0;
LoadCubeOffsets();
LoadTetrahedronsInACube();
LoadTetrahedronNeighbours();
__syncthreads();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
// Search for minimum label among neighbours.
unsigned char tetrahedronFlags = TetrahedronFlags(cubeVertex0, tetrahedronIndex, thresholdValue);
const uint volumeCubeSize = dVolumeSize.x * dVolumeSize.y * dVolumeSize.z;
for(int faceIndex = 0; faceIndex < 4; faceIndex++) {
unsigned char tetrahedronFaceFlags = tetrahedronFlags & tetrahedronFaceMask[faceIndex];
// Test if this face is connected to a neigbour (using identity).
if (tetrahedronFaceFlags != 0 && tetrahedronFaceFlags != tetrahedronFaceMask[faceIndex]) {
uint tfn = tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][0]
+ (tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][1]
+ (tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][2] * dVolumeSize.x)
+ (tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][3] * dVolumeSize.y * dVolumeSize.x)) * 6;
int neighbourId = cubeMap[activeCubeIndex] * 6 + tfn;
int neighbourCubeId = neighbourId / 6;
// Test if cube is active.
if (neighbourCubeId >= 0 && neighbourCubeId < volumeCubeSize && cubeStates[neighbourCubeId] == 1) {
neighbourId = cubeOffsets[neighbourCubeId] * 6 + neighbourId % 6;
// For each triangle.
bool aoState1 = true;
for (uint vertexOffset = vertexOffsets[id]; vertexOffset < vertexOffsets[id + 1]; vertexOffset += 3) {
//float3 v0 = make_float3(vertices[vertexOffset].x, vertices[vertexOffset].y, vertices[vertexOffset].z);
//float3 v1 = make_float3(vertices[vertexOffset + 1].x, vertices[vertexOffset + 1].y, vertices[vertexOffset + 1].z);
//float3 v2 = make_float3(vertices[vertexOffset + 2].x, vertices[vertexOffset + 2].y, vertices[vertexOffset + 2].z);
//float3 n0 = make_float3(normals[vertexOffset].x, normals[vertexOffset].y, normals[vertexOffset].z);
//float3 n1 = make_float3(normals[vertexOffset + 1].x, normals[vertexOffset + 1].y, normals[vertexOffset + 1].z);
//float3 n2 = make_float3(normals[vertexOffset + 2].x, normals[vertexOffset + 2].y, normals[vertexOffset + 2].z);
//float aoValue = (AOValue(v0, n0) + AOValue(v1, n1) + AOValue(v2, n2)) / 3.0f;
float aoValue = triangleAO[vertexOffset/3];
if (aoValue > aoThreshold) {
aoState1 = false;
//break;
}
}
// For each neighbour triangle.
bool aoState2 = true;
for (uint vertexOffset = vertexOffsets[neighbourId]; vertexOffset < vertexOffsets[neighbourId + 1]; vertexOffset += 3) {
//float3 v0 = make_float3(vertices[vertexOffset].x, vertices[vertexOffset].y, vertices[vertexOffset].z);
//float3 v1 = make_float3(vertices[vertexOffset + 1].x, vertices[vertexOffset + 1].y, vertices[vertexOffset + 1].z);
//float3 v2 = make_float3(vertices[vertexOffset + 2].x, vertices[vertexOffset + 2].y, vertices[vertexOffset + 2].z);
//float3 n0 = make_float3(normals[vertexOffset].x, normals[vertexOffset].y, normals[vertexOffset].z);
//float3 n1 = make_float3(normals[vertexOffset + 1].x, normals[vertexOffset + 1].y, normals[vertexOffset + 1].z);
//float3 n2 = make_float3(normals[vertexOffset + 2].x, normals[vertexOffset + 2].y, normals[vertexOffset + 2].z);
//float aoValue = (AOValue(v0, n0) + AOValue(v1, n1) + AOValue(v2, n2)) / 3.0f;
float aoValue = triangleAO[vertexOffset/3];
if (aoValue > aoThreshold) {
aoState2 = false;
//break;
}
}
// Test for AO-Shading connectedness.
if (aoState1 == aoState2) {
// Store connectedness.
uint neighbourLabel = eqList[neighbourId];
if (neighbourLabel < label2) {
label2 = neighbourLabel;
}
}
}
}
}
if (label2 < label1) {
// Write out minimum (atomic, thus no synchronization).
atomicMin(&refList[label1], label2);
*modified = true;
}
}
__global__
void MeshAnalysis_kernel(uint* eqList, uint* refList, uint tetrahedronCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
// Test if thread is the "label owner".
if (eqList[id] == id) {
uint label = id;
uint ref = refList[label];
// Expand equivalence chain.
while (ref != label) {
label = ref;
ref = refList[label];
}
refList[id] = ref;
}
}
__global__
void MeshLabeling_kernel(uint* eqList, uint* refList, uint tetrahedronCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
eqList[id] = refList[eqList[id]];
}
/*
* Mesh centroid handling.
*/
__global__
void CentroidMap_kernel(uint* vertexLabels, uint* vertexOffsets, uint* verticesPerTetrahedron, uint* eqList, uint tetrahedronCount)
{
const uint id = Index();
// Prevent non-power of two access.
if (id >= tetrahedronCount) {
return;
}
const uint label = eqList[id];
const uint offset = vertexOffsets[id];
const uint nextOffset = offset + verticesPerTetrahedron[id];
for (uint index = offset; index < nextOffset; ++index) {
vertexLabels[index] = label;
}
}
__global__
void CentroidFinalize_kernel(float4* centroids, float4* centroidSums, uint* centroidCounts, uint centroidCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= centroidCount) {
return;
}
centroids[id].x = centroidSums[id].x / centroidCounts[id];
centroids[id].y = centroidSums[id].y / centroidCounts[id];
centroids[id].z = centroidSums[id].z / centroidCounts[id];
centroids[id].w = 1.0f;
}
__global__
void ColorizeByCentroid_kernel(float4* colors, float4* centroidColors, uint* centroidLabels, uint centroidCount, uint* vertexLabels, uint vertexCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= vertexCount) {
return;
}
for (int i = 0; i < centroidCount; ++i) {
if (vertexLabels[id] == centroidLabels[i]) {
colors[id] = centroidColors[i];
return;
}
}
}
__global__
void ColorizeByAO_kernel(float4* colors, float* triaAO, float aoThreshold, uint vertexCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= vertexCount) {
return;
}
if( triaAO[id/3] > aoThreshold )
colors[id] = lerp( colors[id], make_float4( 0.0, 1.0, 0.25, 1.0), 0.5);
else
colors[id] = lerp( colors[id], make_float4( 1.0, 0.25, 0.0, 1.0), 0.5);
}
__global__
void ComputeTriangleAreas_kernel(float4 *pos, float *area, unsigned int maxTria)
{
unsigned int zOffset = blockIdx.z * gridDim.x * gridDim.y;
unsigned int blockId = (blockIdx.y * gridDim.x) + blockIdx.x;
unsigned int i = zOffset + (blockId * blockDim.x) + threadIdx.x;
// prevent overrunning of array boundary
if (i >= maxTria)
return;
// get all three triangle vertices
float4 v0 = pos[3*i];
float4 v1 = pos[3*i+1];
float4 v2 = pos[3*i+2];
// compute edge lengths
float a = length( v0 - v1);
float b = length( v0 - v2);
float c = length( v1 - v2);
// compute area (Heron's formula)
float rad = ( a + b + c) * ( a + b - c) * ( b + c - a) * ( c + a - b);
// make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
area[i] = 0.25f * sqrt( rad);
}
///*
// * Utility functions.
// */
//dim3 Grid(const uint size, const int threadsPerBlock) {
// //TODO: remove hardcoded hardware capabilities :(
// // see: http://code.google.com/p/thrust/source/browse/thrust/detail/backend/cuda/arch.inl
// // and http://code.google.com/p/thrust/source/browse/thrust/detail/backend/cuda/detail/safe_scan.inl
// // for refactoring.
// // Get maximum grid size of CUDA device.
// //hipDevice_t device;
// //hipDeviceGet(&device, 0);
// //hipDeviceProp_t deviceProps;
// //hipGetDeviceProperties(&deviceProps, device);
// //this->gridSize = dim3(deviceProps.maxGridSize[0],
// // deviceProps.maxGridSize[1],
// // deviceProps.maxGridSize[2]);
// const dim3 maxGridSize(65535, 65535, 0);
// const int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
// dim3 grid(blocksPerGrid, 1, 1);
// // Test if grid needs to be extended to 2D.
// while (grid.x > maxGridSize.x) {
// grid.x /= 2;
// grid.y *= 2;
// }
// return grid;
//}
__global__
void SetVol_kernel(float* vol)
{
volumeTex = vol;
}
__global__
void SetNeighborAtomVol_kernel(int* vol)
{
neighborAtomTex = vol;
}
/*
* Wrappers.
*/
extern "C"
hipError_t BindVolumeTexture(float* textureArray) {
//return hipBindTextureToArray(volumeTex, textureArray);
//return hipBindTexture(0, volumeTex, textureArray, hipCreateChannelDesc<float>());
hipLaunchKernelGGL(( SetVol_kernel), dim3(Grid(1, 1)), dim3(1), 0, 0, textureArray);
return hipGetLastError();
}
extern "C"
hipError_t BindNeighborAtomTexture(int* textureArray) {
hipLaunchKernelGGL(( SetNeighborAtomVol_kernel), dim3(Grid(1, 1)), dim3(1), 0, 0, textureArray);
return hipGetLastError();
}
extern "C"
hipError_t UnbindVolumeTexture()
{
return hipSuccess;//hipUnbindTexture(volumeTex);
}
extern "C"
hipError_t BindAOVolumeTexture(hipArray* textureArray)
{
// set texture parameters
aoVolumeTex.normalized = 1;
aoVolumeTex.filterMode = hipFilterModeLinear;
aoVolumeTex.addressMode[0] = hipAddressModeClamp;
aoVolumeTex.addressMode[1] = hipAddressModeClamp;
aoVolumeTex.addressMode[2] = hipAddressModeClamp;
// bind array to 3D texture
return hipBindTextureToArray(aoVolumeTex, textureArray, hipCreateChannelDesc<float>());
}
extern "C"
hipError_t UnbindAOVolumeTexture()
{
return hipUnbindTexture(aoVolumeTex);
}
extern "C"
hipError_t ClassifyCubes(uint* cubeStates, float thresholdValue, uint cubeCount)
{
const int threads = 256;
hipLaunchKernelGGL(( ClassifyCubes_kernel), dim3(Grid(cubeCount, threads)), dim3(threads), 0, 0, cubeStates, thresholdValue, cubeCount);
return hipGetLastError();
}
extern "C"
hipError_t ScanCubes(uint* cubeOffsets, uint* cubeStates, uint cubeCount)
{
uint* cubeStatesLast = cubeStates + cubeCount;
thrust::exclusive_scan(thrust::device_ptr<uint>(cubeStates),
thrust::device_ptr<uint>(cubeStatesLast),
thrust::device_ptr<uint>(cubeOffsets));
return hipGetLastError();
}
extern "C"
hipError_t CompactCubes(uint* cubeMap, uint* cubeOffsets, uint* cubeStates, uint cubeCount)
{
const int threads = 256;
hipLaunchKernelGGL(( CompactCubes_kernel), dim3(Grid(cubeCount, threads)), dim3(threads), 0, 0, cubeMap, cubeOffsets, cubeStates, cubeCount);
return hipGetLastError();
}
extern "C"
hipError_t ClassifyTetrahedronsInACube(uint* verticesPerTetrahedron, uint* cubeMap, float thresholdValue, uint activeCubeCount)
{
const int threads = 64;
hipLaunchKernelGGL(( ClassifyTetrahedronsInACube_kernel), dim3(Grid(activeCubeCount, threads)), dim3(threads), 0, 0, verticesPerTetrahedron, cubeMap, thresholdValue, activeCubeCount);
return hipGetLastError();
}
extern "C"
hipError_t ScanTetrahedrons(uint* vertexOffsets, uint* verticesPerTetrahedron, uint tetrahedronCount)
{
uint* verticesPerTetrahedronLast = verticesPerTetrahedron + tetrahedronCount;
thrust::exclusive_scan(thrust::device_ptr<uint>(verticesPerTetrahedron),
thrust::device_ptr<uint>(verticesPerTetrahedronLast),
thrust::device_ptr<uint>(vertexOffsets));
return hipGetLastError();
}
extern "C"
hipError_t GenerateTriangles(float4* vertices, int* neighborAtoms, float4* normals, float translateX, float translateY, float translateZ,
float scaleX, float scaleY, float scaleZ, uint* vertexOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const int threads = GT_THREADS;
hipLaunchKernelGGL(( GenerateTriangles_kernel), dim3(Grid(tetrahedronCount, threads)), dim3(threads), 0, 0, vertices, neighborAtoms, normals, translateX, translateY, translateZ,
scaleX, scaleY, scaleZ, vertexOffsets, cubeMap, thresholdValue, tetrahedronCount);
return hipGetLastError();
}
extern "C"
hipError_t MeshReset(uint* eqList, uint* refList, uint tetrahedronCount, uint* vertexOffsets, float* triangleAO)
{
const int threads = 128;
hipLaunchKernelGGL(( MeshReset_kernel), dim3(Grid(tetrahedronCount, threads)), dim3(threads), 0, 0, eqList, refList, tetrahedronCount, vertexOffsets, triangleAO);
return hipGetLastError();
}
extern "C"
hipError_t MeshScan(float4* vertices, float4* normals, uint* vertexOffsets, float* triangleAO, float aoThreshold, uint* eqList, uint* refList, bool* modified, uint* cubeStates, uint* cubeOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const int threads = 128;
hipLaunchKernelGGL(( MeshScan_kernel), dim3(Grid(tetrahedronCount, threads)), dim3(threads), 0, 0, vertices, normals, vertexOffsets, triangleAO, aoThreshold, eqList, refList, modified, cubeStates, cubeOffsets, cubeMap, thresholdValue, tetrahedronCount);
return hipGetLastError();
}
extern "C"
hipError_t MeshAnalysis(uint* eqList, uint* refList, uint tetrahedronCount)
{
const int threads = 128;
hipLaunchKernelGGL(( MeshAnalysis_kernel), dim3(Grid(tetrahedronCount, threads)), dim3(threads), 0, 0, eqList, refList, tetrahedronCount);
return hipGetLastError();
}
extern "C"
hipError_t MeshLabeling(uint* eqList, uint* refList, uint tetrahedronCount)
{
const int threads = 128;
hipLaunchKernelGGL(( MeshLabeling_kernel), dim3(Grid(tetrahedronCount, threads)), dim3(threads), 0, 0, eqList, refList, tetrahedronCount);
return hipGetLastError();
}
extern "C"
hipError_t CentroidMap(uint* vertexLabels, uint* vertexOffsets, uint* verticesPerTetrahedron, uint* eqList, uint tetrahedronCount)
{
const int threads = 128;
hipLaunchKernelGGL(( CentroidMap_kernel), dim3(Grid(tetrahedronCount, threads)), dim3(threads), 0, 0, vertexLabels, vertexOffsets, verticesPerTetrahedron, eqList, tetrahedronCount);
return hipGetLastError();
}
extern "C"
hipError_t CentroidFinalize(float4* centroids, float4* centroidSums, uint* centroidCounts, uint centroidCount)
{
const int threads = 128;
hipLaunchKernelGGL(( CentroidFinalize_kernel), dim3(Grid(centroidCount, threads)), dim3(threads), 0, 0, centroids, centroidSums, centroidCounts, centroidCount);
return hipGetLastError();
}
extern "C"
hipError_t ColorizeByCentroid(float4* colors, float4* centroidColors, uint* centroidLabels, uint centroidCount, uint* vertexLabels, uint vertexCount)
{
const int threads = 128;
hipLaunchKernelGGL(( ColorizeByCentroid_kernel), dim3(Grid(vertexCount, threads)), dim3(threads), 0, 0, colors, centroidColors, centroidLabels, centroidCount, vertexLabels, vertexCount);
return hipGetLastError();
}
extern "C"
hipError_t ColorizeByAO(float4* colors, float* triangleAO, float aoThreashold, uint vertexCount)
{
const int threads = 128;
hipLaunchKernelGGL(( ColorizeByAO_kernel), dim3(Grid(vertexCount, threads)), dim3(threads), 0, 0, colors, triangleAO, aoThreashold, vertexCount);
return hipGetLastError();
}
extern "C"
hipError_t ComputeSurfaceArea(float4* verts, float* areas, unsigned int triaCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return hipSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
hipLaunchKernelGGL(( ComputeTriangleAreas_kernel), dim3(grid), dim3(threads), 0, 0, verts, areas, triaCount);
return hipGetLastError();
}
__global__
void ComputeCentroidArea_kernel(float* centroidAreas, uint2* featureStartEnd, float* triaAreas, uint* vertexLabels, uint* centroidLabels, uint triaCount, uint centroidCount)
{
const uint id = Index();
// Prevent overrunning of array
if (id >= triaCount) {
return;
}
uint labelId = vertexLabels[id*3];
for (int i = 0; i < centroidCount; ++i) {
if ( labelId == centroidLabels[i]) {
atomicAdd( ¢roidAreas[i], triaAreas[id]);
if( id > 0 ) {
// check if the label of the previous triangle is different
if( vertexLabels[(id-1)*3] != labelId ) {
featureStartEnd[i].x = id;
featureStartEnd[i-1].y = id-1;
}
} else {
featureStartEnd[0].x = 0;
featureStartEnd[centroidCount-1].y = triaCount-1;
}
}
}
}
extern "C"
hipError_t ComputeCentroidArea(float* centroidAreas, uint2* featureStartEnd, float* triaAreas, uint* vertexLabels, uint* centroidLabels, uint triaCount, uint centroidCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return hipSuccess;
// do nothing for zero centroids
if (centroidCount <= 0)
return hipSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
hipError_t err = hipMemset( centroidAreas, 0, centroidCount * sizeof(float));
if( err != hipSuccess ) return err;
hipLaunchKernelGGL(( ComputeCentroidArea_kernel), dim3(grid), dim3(threads), 0, 0, centroidAreas, featureStartEnd, triaAreas, vertexLabels, centroidLabels, triaCount, centroidCount);
return hipGetLastError();
}
__global__
void ComputeTriangleAO_kernel(float4 *vertices, float4* normals, float *ao, unsigned int maxTria)
{
unsigned int zOffset = blockIdx.z * gridDim.x * gridDim.y;
unsigned int blockId = (blockIdx.y * gridDim.x) + blockIdx.x;
unsigned int i = zOffset + (blockId * blockDim.x) + threadIdx.x;
// prevent overrunning of array boundary
if (i >= maxTria)
return;
// get all three triangle vertices and normals
float3 v0 = make_float3(vertices[3*i+0].x, vertices[3*i+0].y, vertices[3*i+0].z);
float3 v1 = make_float3(vertices[3*i+1].x, vertices[3*i+1].y, vertices[3*i+1].z);
float3 v2 = make_float3(vertices[3*i+2].x, vertices[3*i+2].y, vertices[3*i+2].z);
float3 n0 = make_float3(normals[3*i+0].x, normals[3*i+0].y, normals[3*i+0].z);
float3 n1 = make_float3(normals[3*i+1].x, normals[3*i+1].y, normals[3*i+1].z);
float3 n2 = make_float3(normals[3*i+2].x, normals[3*i+2].y, normals[3*i+2].z);
ao[i] = (AOValue(v0, n0) + AOValue(v1, n1) + AOValue(v2, n2)) / 3.0f;
}
extern "C"
hipError_t ComputeTriangleAO(float4* verts, float4* normals, float* ao, unsigned int triaCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return hipSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
hipLaunchKernelGGL(( ComputeTriangleAO_kernel), dim3(grid), dim3(threads), 0, 0, verts, normals, ao, triaCount);
return hipGetLastError();
}
__global__
void RemoveSmallSegments_kernel(float* centroidAreas, float* triangleAO, uint* vertexLabels, uint* centroidLabels, float areaThreshold, float aoThreshold,
uint triaCount, uint centroidCount, bool* segmentsRemoved)
{
const uint id = Index();
// Prevent overrunning of array
if (id >= triaCount) {
return;
}
for (int i = 0; i < centroidCount; ++i) {
if (vertexLabels[id*3] == centroidLabels[i]) {
// check centroid area
if( centroidAreas[i] < areaThreshold ) {
// "flip" ambient occlusion factor
if( triangleAO[id] <= aoThreshold ) {
triangleAO[id] = aoThreshold + 1.0f;
} else {
triangleAO[id] = 0.0f;
}
// mark as modified
*segmentsRemoved = true;
}
}
}
}
extern "C"
hipError_t RemoveSmallSegments(float* centroidAreas, float* triangleAO, uint* vertexLabels, uint* centroidLabels, float areaThreshold, float aoThreshold,
uint triaCount, uint centroidCount, bool* segmentsRemoved)
{
// do nothing for zero triangles
if (triaCount <= 0)
return hipSuccess;
// do nothing for zero centroids
if (centroidCount <= 0)
return hipSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
hipLaunchKernelGGL(( RemoveSmallSegments_kernel), dim3(grid), dim3(threads), 0, 0, centroidAreas, triangleAO, vertexLabels, centroidLabels, areaThreshold, aoThreshold, triaCount, centroidCount, segmentsRemoved);
return hipGetLastError();
}
__global__
void ComputeTriangleBBox_kernel(float* fBBoxMinX, float* fBBoxMinY, float* fBBoxMinZ,
float* fBBoxMaxX, float* fBBoxMaxY, float* fBBoxMaxZ, uint* triangleLabels,
float4* verts, uint* vertexLabels, uint triaCount)
{
const uint id = Index();
// Prevent overrunning of array
if (id >= triaCount) {
return;
}
// get min/max x/y/z of all three triangle vertices (i.e. triangle bbox)
float4 v0 = verts[3*id];
float4 v1 = verts[3*id+1];
float4 v2 = verts[3*id+2];
float3 minVec = make_float3(
min( min( v0.x, v1.x), v2.x),
min( min( v0.y, v1.y), v2.y),
min( min( v0.z, v1.z), v2.z));
float3 maxVec = make_float3(
max( max( v0.x, v1.x), v2.x),
max( max( v0.y, v1.y), v2.y),
max( max( v0.z, v1.z), v2.z));
// write result
fBBoxMinX[id] = minVec.x;
fBBoxMinY[id] = minVec.y;
fBBoxMinZ[id] = minVec.z;
fBBoxMaxX[id] = maxVec.x;
fBBoxMaxY[id] = maxVec.y;
fBBoxMaxZ[id] = maxVec.z;
triangleLabels[id] = vertexLabels[3*id];
}
extern "C"
hipError_t ComputeTriangleBBox(float* fBBoxMinX, float* fBBoxMinY, float* fBBoxMinZ,
float* fBBoxMaxX, float* fBBoxMaxY, float* fBBoxMaxZ, uint* triangleLabels,
float4* verts, uint* vertexLabels, uint triaCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return hipSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
hipLaunchKernelGGL(( ComputeTriangleBBox_kernel), dim3(grid), dim3(threads), 0, 0, fBBoxMinX, fBBoxMinY, fBBoxMinZ,
fBBoxMaxX, fBBoxMaxY, fBBoxMaxZ, triangleLabels, verts, vertexLabels, triaCount);
return hipGetLastError();
}
__global__
void WritePrevTetraLabel_kernel( int2* labelPair, uint* cubeStatesOld, uint* cubeOffsetsOld, //uint* cubeMapOld,
uint* verticesPerTetrahedronOld, uint* eqListOld, uint* cubeMap, uint* verticesPerTetrahedron,
uint* eqList, uint tetrahedronCount)
{
const uint activeTetraIdx = Index();
const uint activeCubeIdx = activeTetraIdx / 6;
// Prevent non-power of two writes.
if( activeTetraIdx >= tetrahedronCount ) {
return;
}
const uint globalCubeIdx = cubeMap[activeCubeIdx];
const uint tetraIdx = activeTetraIdx % 6;
//const uint globalTetraIdx = globalCubeIdx * 6 + tetraIdx;
uint cnt1 = verticesPerTetrahedron[activeTetraIdx];
// check if cube was active in previous time step
if( cubeStatesOld[globalCubeIdx] > 0 && cnt1 > 0 ) {
// get the index of the current cube in the previous time step
uint activeCubeIdxOld = cubeOffsetsOld[globalCubeIdx];
// Test if the mapped global indes is equal --> TEST PASSED!
//if( cubeMapOld[activeCubeIdxOld] != globalCubeIdx ) {
// printf( "Greetings from thread %i!\n", activeTetraIdx);
//}
uint cnt0 = verticesPerTetrahedronOld[activeCubeIdxOld * 6 + tetraIdx];
if( cnt0 > 0 ) {
labelPair[activeTetraIdx].x = eqList[activeTetraIdx];
labelPair[activeTetraIdx].y = eqListOld[activeCubeIdxOld * 6 + tetraIdx];
} else {
labelPair[activeTetraIdx].x = -1;
labelPair[activeTetraIdx].y = -1;
}
} else {
// cube was not active previously
labelPair[activeTetraIdx].x = -1;
labelPair[activeTetraIdx].y = -1;
}
}
extern "C"
hipError_t WritePrevTetraLabel( int2* labelPair, uint* cubeStatesOld, uint* cubeOffsetsOld, //uint* cubeMapOld,
uint* verticesPerTetrahedronOld, uint* eqListOld, uint* cubeMap, uint* verticesPerTetrahedron,
uint* eqList, uint tetrahedronCount)
{
const int threads = GT_THREADS;
hipLaunchKernelGGL(( WritePrevTetraLabel_kernel), dim3(Grid(tetrahedronCount, threads)), dim3(threads), 0, 0, labelPair, cubeStatesOld, cubeOffsetsOld, //cubeMapOld,
verticesPerTetrahedronOld, eqListOld, cubeMap, verticesPerTetrahedron, eqList, tetrahedronCount);
return hipGetLastError();
}
__global__
void WriteTriangleVertexIndexList_kernel( uint* featureVertexIdx, uint* featureVertexCnt, uint* featureVertexStartIdx, uint* featureVertexIdxOut, uint triaVertexCnt, uint vertexCnt)
{
// vertex index
const uint vertexIdx = Index();
// check bounds
if( vertexIdx >= vertexCnt ) {
return;
}
// the number of duplications for vertex 'vertexIdx'
const uint fvCnt = featureVertexCnt[vertexIdx];
// the start index of the duplicates for vertex 'vertexIdx'
const uint fvStartIdx = featureVertexStartIdx[vertexIdx];
// temp variable for original vertex index
uint origIdx;
// loop over all duplicates
for( uint i = 0; i < fvCnt; i++ ) {
// get original vertex index
origIdx = featureVertexIdx[fvStartIdx + i];
// write new vertex index
featureVertexIdxOut[origIdx] = vertexIdx;
}
}
extern "C"
hipError_t WriteTriangleVertexIndexList( uint* featureVertexIdx, uint* featureVertexCnt, uint* featureVertexStartIdx, uint* featureVertexIdxOut, uint triaVertexCnt, uint vertexCnt) {
const int threads = GT_THREADS;
hipLaunchKernelGGL(( WriteTriangleVertexIndexList_kernel), dim3(Grid(vertexCnt, threads)), dim3(threads), 0, 0, featureVertexIdx, featureVertexCnt, featureVertexStartIdx, featureVertexIdxOut, triaVertexCnt, vertexCnt);
return hipGetLastError();
}
__global__
void WriteTriangleEdgeList_kernel( uint* featureVertexIdxOut, uint triaCnt, uint2 *featureEdges)
{
// feature index
const uint triangleIdx = Index() * 3;
// check bounds
if( triangleIdx >= (triaCnt * 3)) {
return;
}
// get the three vertex indices of the current triangle
uint idx0 = featureVertexIdxOut[triangleIdx];
uint idx1 = featureVertexIdxOut[triangleIdx+1];
uint idx2 = featureVertexIdxOut[triangleIdx+2];
// write three edges of the current feature triangle (vertex indices sorted)
featureEdges[triangleIdx] = make_uint2( min( idx0, idx1), max( idx0, idx1));
featureEdges[triangleIdx+1] = make_uint2( min( idx0, idx2), max( idx0, idx2));
featureEdges[triangleIdx+2] = make_uint2( min( idx1, idx2), max( idx1, idx2));
}
extern "C"
hipError_t WriteTriangleEdgeList( uint* featureVertexIdxOut, uint triaCnt, uint2 *featureEdges) {
const int threads = GT_THREADS;
hipLaunchKernelGGL(( WriteTriangleEdgeList_kernel), dim3(Grid(triaCnt, threads)), dim3(threads), 0, 0, featureVertexIdxOut, triaCnt, featureEdges);
return hipGetLastError();
}
#endif
| feaf8912821790d1b8635b1bec47b3834504ae3b.cu | /*
* VolumeMeshRenderer.cu
*
* Copyright (C) 2012 by Universitaet Stuttgart (VIS).
* Alle Rechte vorbehalten.
*/
#ifndef MEGAMOLPROTEIN_VOLUMEMESHRENDERER_CU_INCLUDED
#define MEGAMOLPROTEIN_VOLUMEMESHRENDERER_CU_INCLUDED
#include "VolumeMeshRenderer.cuh"
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/device_ptr.h>
#include "helper_math.h"
#ifdef CUDA_NO_SM_11_ATOMIC_INTRINSICS
#error "Atomic intrinsics are missing (nvcc -arch=sm_11)"
#endif
#define GT_THREADS 128
/*
* Variables.
*/
//texture<float4, cudaTextureType3D, cudaReadModeElementType> volumeTex;
//texture<float, cudaTextureType1D, cudaReadModeElementType> volumeTex;
__device__ float* volumeTex;
__device__ int* neighborAtomTex;
texture<float, cudaTextureType3D, cudaReadModeElementType> aoVolumeTex;
__device__ __constant__ uint3 dVolumeSize;
cudaError_t copyVolSizeToDevice( uint3 volSize) {
cudaError_t error = cudaMemcpyToSymbol( dVolumeSize, (void*)&volSize, sizeof(uint3));
cudaDeviceSynchronize();
return error;
}
cudaError_t copyVolSizeFromDevice( uint3 &volSize) {
volSize = make_uint3( 0, 0, 0);
cudaError_t error = cudaMemcpyFromSymbol( (void*)&volSize, dVolumeSize, sizeof(uint3));
cudaDeviceSynchronize();
return error;
}
/*
* Voxel index and access functions.
*/
inline __device__
uint Index()
{
return __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x;
}
inline __device__
float VoxelValue(float3 index)
{
uint idx = index.z * dVolumeSize.x * dVolumeSize.y + index.y * dVolumeSize.x + index.x;
return volumeTex[idx];//tex1Dfetch(volumeTex, idx);
}
inline __device__
int NeighborAtom(float3 index)
{
uint idx = index.z * dVolumeSize.x * dVolumeSize.y + index.y * dVolumeSize.x + index.x;
return neighborAtomTex[idx];
}
inline __device__
float3 VoxelGradient(float3 index)
{
float3 result;
// Compute central difference.
result.x = VoxelValue(make_float3(index.x - 1.0f, index.y, index.z))
- VoxelValue(make_float3(index.x + 1.0f, index.y, index.z));
result.y = VoxelValue(make_float3(index.x, index.y - 1.0f, index.z))
- VoxelValue(make_float3(index.x, index.y + 1.0f, index.z));
result.z = VoxelValue(make_float3(index.x, index.y, index.z - 1.0f))
- VoxelValue(make_float3(index.x, index.y, index.z + 1.0f));
return result;
}
/*
* Marching tetrahedrons.
*/
inline __device__
float3 CubeVertex0(uint index)
{
return make_float3(index % dVolumeSize.x, (index / dVolumeSize.x) % dVolumeSize.y,
index / (dVolumeSize.x * dVolumeSize.y));
}
__global__
void ClassifyCubes_kernel(uint* cubeStates, float thresholdValue, uint cubeCount)
{
const uint cubeIndex = min(Index(), cubeCount - 1);
const float3 cubeVertex0 = CubeVertex0(cubeIndex);
const float cubeVertexOffsetsL[8][3] = {
{0, 0, 0},
{1, 0, 0},
{1, 1, 0},
{0, 1, 0},
{0, 0, 1},
{1, 0, 1},
{1, 1, 1},
{0, 1, 1}
};
// Add "vertex states" of a cube (0: inactive, 1: active).
unsigned char cubeFlags = static_cast<uint>(VoxelValue(cubeVertex0) <= thresholdValue);
for (int cubeVertexIndex = 1; cubeVertexIndex < 8; ++cubeVertexIndex) {
const float3 cubeVertex = make_float3(
cubeVertex0.x + cubeVertexOffsetsL[cubeVertexIndex][0],
cubeVertex0.y + cubeVertexOffsetsL[cubeVertexIndex][1],
cubeVertex0.z + cubeVertexOffsetsL[cubeVertexIndex][2]);
cubeFlags |= static_cast<uint>(VoxelValue(cubeVertex) <= thresholdValue) * (1 << cubeVertexIndex);
}
// Reduce "vertex states" to a "cube state".
cubeStates[cubeIndex] = min(cubeFlags % 255, 1);
}
__global__
void CompactCubes_kernel(uint* cubeMap, uint* cubeOffsets, uint* cubeStates, uint cubeCount)
{
const uint cubeIndex = min(Index(), cubeCount - 1);
if (cubeStates[cubeIndex] != 0) {
// Map from active cubes list to cube index.
cubeMap[cubeOffsets[cubeIndex]] = cubeIndex;
}
}
__device__ __shared__ float3 cubeVertexOffsets[8];
__device__ __constant__ float cubeVertexOffsetsC[8][3] = {
{0, 0, 0},
{1, 0, 0},
{1, 1, 0},
{0, 1, 0},
{0, 0, 1},
{1, 0, 1},
{1, 1, 1},
{0, 1, 1}
};
inline __device__
void LoadCubeOffsets()
{
// Load cube vertex offsets into shared memory.
if (threadIdx.x < 8) {
cubeVertexOffsets[threadIdx.x] = make_float3(cubeVertexOffsetsC[threadIdx.x][0],
cubeVertexOffsetsC[threadIdx.x][1], cubeVertexOffsetsC[threadIdx.x][2]);
}
}
__device__ __shared__ uint tetrahedronsInACube[6][4];
__device__ __constant__ uint tetrahedronsInACubeC[6][4] = {
{0, 5, 1, 6},
{0, 1, 2, 6},
{0, 2, 3, 6},
{0, 3, 7, 6},
{0, 7, 4, 6},
{0, 4, 5, 6}
};
inline __device__
void LoadTetrahedronsInACube()
{
// Load tetrahedron vertex index to cube index map into shared memory.
if (threadIdx.x < 6) {
for (int i = 0; i < 4; ++i) {
tetrahedronsInACube[threadIdx.x][i] = tetrahedronsInACubeC[threadIdx.x][i];
}
}
}
__device__ __shared__ uint tetrahedronVertexCount[16];
__device__ __constant__ uint tetrahedronVertexCountC[16] = {
0, 3, 3, 6, 3, 6, 6, 3,
3, 6, 6, 3, 6, 3, 3, 0
};
inline __device__
void LoadTetrahedronVertexCount()
{
// Load tetrahedron vertex count into shared memory.
if (threadIdx.x < 16) {
tetrahedronVertexCount[threadIdx.x] = tetrahedronVertexCountC[threadIdx.x];
}
}
inline __device__
unsigned char TetrahedronFlags(float3 cubeVertex0, int tetrahedronIndex, float thresholdValue)
{
unsigned char flags = 0;
for (int tetrahedronVertexIndex = 0; tetrahedronVertexIndex < 4; ++tetrahedronVertexIndex) {
const float3 cubeVertexOffset = cubeVertexOffsets[tetrahedronsInACube[tetrahedronIndex][tetrahedronVertexIndex]];
if (VoxelValue(cubeVertex0 + cubeVertexOffset) <= thresholdValue) {
flags |= 1 << static_cast<unsigned char>(tetrahedronVertexIndex);
}
}
return flags;
}
__global__
void ClassifyTetrahedronsInACube_kernel(uint* verticesPerTetrahedron, uint* cubeMap, float thresholdValue, uint activeCubeCount)
{
const uint activeCubeIndex = Index();
const float3 cubeVertex0 = CubeVertex0(cubeMap[activeCubeIndex]);
LoadCubeOffsets();
LoadTetrahedronsInACube();
LoadTetrahedronVertexCount();
__syncthreads();
// Prevent non-power of two writes.
if (activeCubeIndex >= activeCubeCount) {
return;
}
// Classify all tetrahedrons in a cube.
for (int tetrahedronIndex = 0; tetrahedronIndex < 6; ++tetrahedronIndex) {
// Compute tetrahedron flags.
unsigned char tetrahedronFlags = TetrahedronFlags(cubeVertex0, tetrahedronIndex, thresholdValue);
// Store number of vertices.
verticesPerTetrahedron[activeCubeIndex * 6 + tetrahedronIndex] = tetrahedronVertexCount[tetrahedronFlags];
}
}
__device__ __shared__ unsigned char tetrahedronEdgeFlags[16];
__device__ __constant__ unsigned char tetrahedronEdgeFlagsC[16] = {
0x00, 0x0d, 0x13, 0x1e, 0x26, 0x2b, 0x35, 0x38,
0x38, 0x35, 0x2b, 0x26, 0x1e, 0x13, 0x0d, 0x00
};
__device__ __shared__ char tetrahedronEdgeConnections[6][2];
__device__ __constant__ char tetrahedronEdgeConnectionsC[6][2] = {
{0, 1}, {1, 2}, {2, 0}, {0, 3}, {1, 3}, {2, 3}
};
inline __device__
void LoadTetrahedronEdgeFlagsAndConnections()
{
// Load tetrahedron edge flags into shared memory.
if (threadIdx.x < 16) {
tetrahedronEdgeFlags[threadIdx.x] = tetrahedronEdgeFlagsC[threadIdx.x];
}
// Load tetrahedron edge connection table into shared memory.
if (threadIdx.x < 6) {
tetrahedronEdgeConnections[threadIdx.x][0] = tetrahedronEdgeConnectionsC[threadIdx.x][0];
tetrahedronEdgeConnections[threadIdx.x][1] = tetrahedronEdgeConnectionsC[threadIdx.x][1];
}
}
__device__ __shared__ char tetrahedronTriangles[16][6];
__device__ __constant__ char tetrahedronTrianglesC[16][6] = {
{-1, -1, -1, -1, -1, -1},
{ 0, 3, 2, -1, -1, -1},
{ 0, 1, 4, -1, -1, -1},
{ 1, 4, 2, 2, 4, 3},
{ 1, 2, 5, -1, -1, -1},
{ 0, 3, 5, 0, 5, 1},
{ 0, 2, 5, 0, 5, 4},
{ 5, 4, 3, -1, -1, -1},
{ 3, 4, 5, -1, -1, -1},
{ 4, 5, 0, 5, 2, 0},
{ 1, 5, 0, 5, 3, 0},
{ 5, 2, 1, -1, -1, -1},
{ 3, 4, 2, 2, 4, 1},
{ 4, 1, 0, -1, -1, -1},
{ 2, 3, 0, -1, -1, -1},
{-1, -1, -1, -1, -1, -1}
};
inline __device__
void LoadTetrahedronTriangles()
{
// Load tetrahedron triangle table into shared memory.
if (threadIdx.x < 16) {
for (int i = 0; i < 6; ++i) {
tetrahedronTriangles[threadIdx.x][i] = tetrahedronTrianglesC[threadIdx.x][i];
}
}
}
__global__
void GenerateTriangles_kernel(float4* vertices, int* neighborAtom, float4* normals, float translateX, float translateY, float translateZ,
float scaleX, float scaleY, float scaleZ, uint* vertexOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const uint id = Index();
const uint activeCubeIndex = id / 6;
const int tetrahedronIndex = id % 6;
const float3 cubeVertex0 = CubeVertex0(cubeMap[activeCubeIndex]);
LoadCubeOffsets();
LoadTetrahedronsInACube();
LoadTetrahedronEdgeFlagsAndConnections();
LoadTetrahedronTriangles();
__syncthreads();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
unsigned char tetrahedronFlags = TetrahedronFlags(cubeVertex0, tetrahedronIndex, thresholdValue);
// Skip inaktive tetrahedrons.
if (tetrahedronFlags == 0x00 || tetrahedronFlags == 0x0F) {
return;
}
__shared__ float3 edgeVertex[6 * GT_THREADS];
// temporary storage for edge vertex neighbor atom
__shared__ int edgeVertexNeighborAtom[6 * GT_THREADS];
__shared__ float3 edgeNormal[6 * GT_THREADS];
// Find intersection of the surface with each edge.
for (int edgeIndex = 0; edgeIndex < 6; edgeIndex++) {
// Test if edge intersects with surface.
if (tetrahedronEdgeFlags[tetrahedronFlags] & (1 << static_cast<unsigned char>(edgeIndex))) {
// Interpolate vertex.
const float3 v0 = cubeVertex0 + cubeVertexOffsets[tetrahedronsInACube[tetrahedronIndex][tetrahedronEdgeConnections[edgeIndex][0]]];
const float3 v1 = cubeVertex0 + cubeVertexOffsets[tetrahedronsInACube[tetrahedronIndex][tetrahedronEdgeConnections[edgeIndex][1]]];
const float f0 = VoxelValue(v0);
const float interpolator = (thresholdValue - f0) / (VoxelValue(v1) - f0);
float3 vertex = lerp(make_float3(v0.x, v0.y, v0.z), make_float3(v1.x, v1.y, v1.z), interpolator);
edgeVertex[threadIdx.x * 6 + edgeIndex] = vertex;
// store nearest atom per edge vertex
edgeVertexNeighborAtom[threadIdx.x * 6 + edgeIndex] = (interpolator < 0.5) ? NeighborAtom(v0) : NeighborAtom(v1);
// Compute normal from gradient.
edgeNormal[threadIdx.x * 6 + edgeIndex] = normalize(lerp(VoxelGradient(v0), VoxelGradient(v1), interpolator));
}
}
// Write vertices.
for (int triangleIndex = 0; triangleIndex < 2; triangleIndex++) {
if (tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex] >= 0) {
//int edgeIndex0 = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + 0];
//int edgeIndex1 = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + 1];
//int edgeIndex2 = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + 2];
//float3 faceNormal = cross(edgeVertex[edgeIndex1] - edgeVertex[edgeIndex0], edgeVertex[edgeIndex2] - edgeVertex[edgeIndex0]);
for (int cornerIndex = 0; cornerIndex < 3; cornerIndex++) {
int edgeIndex = threadIdx.x * 6 + tetrahedronTriangles[tetrahedronFlags][3 * triangleIndex + cornerIndex];
uint vertexOffset = vertexOffsets[id] + 3 * triangleIndex + cornerIndex;
//vertices[vertexOffset] = make_float4(translateX + edgeVertex[edgeIndex].x * scaleX,
// translateY + edgeVertex[edgeIndex].y * scaleY,
// translateZ + edgeVertex[edgeIndex].z * scaleZ, 1.0f);
//vertices[vertexOffset] = make_float4( edgeVertex[edgeIndex].x / 128.0f,
// edgeVertex[edgeIndex].y / 128.0f ,
// edgeVertex[edgeIndex].z / 128.0f, 1.0f);
vertices[vertexOffset] = make_float4( edgeVertex[edgeIndex].x,
edgeVertex[edgeIndex].y, edgeVertex[edgeIndex].z, 1.0f);
// store nearest atom per output vertex
neighborAtom[vertexOffset] = edgeVertexNeighborAtom[edgeIndex];
//normals[vertexOffset] = make_float4(faceNormal.x, faceNormal.y, faceNormal.z, 0.0f);
normals[vertexOffset] = make_float4(edgeNormal[edgeIndex].x,
edgeNormal[edgeIndex].y, edgeNormal[edgeIndex].z, 0.0f);
}
}
}
}
/*
* Mesh scanning.
*/
__global__
void MeshReset_kernel(uint* eqList, uint* refList, uint tetrahedronCount, uint* vertexOffsets, float* triangleAO)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
// TODO: fix this! -> MeshScan should work correctly for different AO factors in one tetrahedron!
// Set the ambient occlusion values for all triangles to the same value (average of all individual triangle AO factors)
float aoValue = 0.0;
for (uint vertexOffset = vertexOffsets[id]; vertexOffset < vertexOffsets[id + 1]; vertexOffset += 3) {
aoValue += triangleAO[vertexOffset/3];
}
aoValue /= float(( vertexOffsets[id + 1] - vertexOffsets[id]) / 3);
for (uint vertexOffset = vertexOffsets[id]; vertexOffset < vertexOffsets[id + 1]; vertexOffset += 3) {
triangleAO[vertexOffset/3] = aoValue;
}
eqList[id] = id;
refList[id] = id;
}
__device__ __shared__ unsigned char tetrahedronFaceMask[4];
__device__ __constant__ unsigned char tetrahedronFaceMaskC[4] = {
0x07, 0x0b, 0x0d, 0x0e
};
// This macro greatly simplifies the adjacent neighbour lookup by computing an
// offset from the cube's index (not the tetrahedron's global index) using a
// cube offsets and a local tetrahedron index.
#define TFN(t, cX, cY, cZ) {t, cX, cY, cZ}
__device__ __shared__ int tetrahedronFaceNeighbours[6][4][4];
__device__ __constant__ int tetrahedronFaceNeighboursC[6][4][4] = {
{TFN(2, 0, -1, 0), TFN(5, 0, 0, 0), TFN(1, 0, 0, 0), TFN(4, 1, 0, 0)},
{TFN(5, 0, 0, -1), TFN(0, 0, 0, 0), TFN(2, 0, 0, 0), TFN(3, 1, 0, 0)},
{TFN(4, 0, 0, -1), TFN(1, 0, 0, 0), TFN(3, 0, 0, 0), TFN(0, 0, 1, 0)},
{TFN(1, -1, 0, 0), TFN(2, 0, 0, 0), TFN(4, 0, 0, 0), TFN(5, 0, 1, 0)},
{TFN(0, -1, 0, 0), TFN(3, 0, 0, 0), TFN(5, 0, 0, 0), TFN(2, 0, 0, 1)},
{TFN(3, 0, -1, 0), TFN(4, 0, 0, 0), TFN(0, 0, 0, 0), TFN(1, 0, 0, 1)}
};
#undef TFN
inline __device__
void LoadTetrahedronNeighbours()
{
// Load tetrahedron face masks into shared memory.
if (threadIdx.x < 4) {
tetrahedronFaceMask[threadIdx.x] = tetrahedronFaceMaskC[threadIdx.x];
}
// Load tetrahedron face neighbours table into shared memory.
if (threadIdx.x < 6) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
tetrahedronFaceNeighbours[threadIdx.x][i][j] = tetrahedronFaceNeighboursC[threadIdx.x][i][j];
}
}
}
}
inline __device__
float AOValue(float3 position, float3 normal) {
//TODO: double check this scale * 4 hack.
float3 scale = make_float3(1.0f / dVolumeSize.x,
1.0f / dVolumeSize.y, 1.0f / dVolumeSize.z);
float x = position.x * scale.x + normal.x * scale.x;
float y = position.y * scale.y + normal.y * scale.y;
float z = position.z * scale.z + normal.z * scale.z;
return tex3D(aoVolumeTex, x, y, z);
//float aofac = 0.0f;
//for ( uint i = 0; i < 10; i++ ) {
// x = position.x * scale.x + normal.x * scale.x * ( 1.0f / 10.0f);
// y = position.y * scale.y + normal.y * scale.y * ( 1.0f / 10.0f);
// z = position.z * scale.z + normal.z * scale.z * ( 1.0f / 10.0f);
// aofac = max( aofac, tex3D(aoVolumeTex, x, y, z));
//}
//return aofac;
}
__global__
void MeshScan_kernel(float4* vertices, float4* normals, uint* vertexOffsets, float* triangleAO, float aoThreshold, uint* eqList, uint* refList,
bool* modified, uint* cubeStates, uint* cubeOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const uint id = Index();
const uint activeCubeIndex = id / 6;
const int tetrahedronIndex = id % 6;
const float3 cubeVertex0 = CubeVertex0(cubeMap[activeCubeIndex]);
const uint label1 = eqList[id];
uint label2 = ~0;
LoadCubeOffsets();
LoadTetrahedronsInACube();
LoadTetrahedronNeighbours();
__syncthreads();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
// Search for minimum label among neighbours.
unsigned char tetrahedronFlags = TetrahedronFlags(cubeVertex0, tetrahedronIndex, thresholdValue);
const uint volumeCubeSize = dVolumeSize.x * dVolumeSize.y * dVolumeSize.z;
for(int faceIndex = 0; faceIndex < 4; faceIndex++) {
unsigned char tetrahedronFaceFlags = tetrahedronFlags & tetrahedronFaceMask[faceIndex];
// Test if this face is connected to a neigbour (using identity).
if (tetrahedronFaceFlags != 0 && tetrahedronFaceFlags != tetrahedronFaceMask[faceIndex]) {
uint tfn = tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][0]
+ (tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][1]
+ (tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][2] * dVolumeSize.x)
+ (tetrahedronFaceNeighbours[tetrahedronIndex][faceIndex][3] * dVolumeSize.y * dVolumeSize.x)) * 6;
int neighbourId = cubeMap[activeCubeIndex] * 6 + tfn;
int neighbourCubeId = neighbourId / 6;
// Test if cube is active.
if (neighbourCubeId >= 0 && neighbourCubeId < volumeCubeSize && cubeStates[neighbourCubeId] == 1) {
neighbourId = cubeOffsets[neighbourCubeId] * 6 + neighbourId % 6;
// For each triangle.
bool aoState1 = true;
for (uint vertexOffset = vertexOffsets[id]; vertexOffset < vertexOffsets[id + 1]; vertexOffset += 3) {
//float3 v0 = make_float3(vertices[vertexOffset].x, vertices[vertexOffset].y, vertices[vertexOffset].z);
//float3 v1 = make_float3(vertices[vertexOffset + 1].x, vertices[vertexOffset + 1].y, vertices[vertexOffset + 1].z);
//float3 v2 = make_float3(vertices[vertexOffset + 2].x, vertices[vertexOffset + 2].y, vertices[vertexOffset + 2].z);
//float3 n0 = make_float3(normals[vertexOffset].x, normals[vertexOffset].y, normals[vertexOffset].z);
//float3 n1 = make_float3(normals[vertexOffset + 1].x, normals[vertexOffset + 1].y, normals[vertexOffset + 1].z);
//float3 n2 = make_float3(normals[vertexOffset + 2].x, normals[vertexOffset + 2].y, normals[vertexOffset + 2].z);
//float aoValue = (AOValue(v0, n0) + AOValue(v1, n1) + AOValue(v2, n2)) / 3.0f;
float aoValue = triangleAO[vertexOffset/3];
if (aoValue > aoThreshold) {
aoState1 = false;
//break;
}
}
// For each neighbour triangle.
bool aoState2 = true;
for (uint vertexOffset = vertexOffsets[neighbourId]; vertexOffset < vertexOffsets[neighbourId + 1]; vertexOffset += 3) {
//float3 v0 = make_float3(vertices[vertexOffset].x, vertices[vertexOffset].y, vertices[vertexOffset].z);
//float3 v1 = make_float3(vertices[vertexOffset + 1].x, vertices[vertexOffset + 1].y, vertices[vertexOffset + 1].z);
//float3 v2 = make_float3(vertices[vertexOffset + 2].x, vertices[vertexOffset + 2].y, vertices[vertexOffset + 2].z);
//float3 n0 = make_float3(normals[vertexOffset].x, normals[vertexOffset].y, normals[vertexOffset].z);
//float3 n1 = make_float3(normals[vertexOffset + 1].x, normals[vertexOffset + 1].y, normals[vertexOffset + 1].z);
//float3 n2 = make_float3(normals[vertexOffset + 2].x, normals[vertexOffset + 2].y, normals[vertexOffset + 2].z);
//float aoValue = (AOValue(v0, n0) + AOValue(v1, n1) + AOValue(v2, n2)) / 3.0f;
float aoValue = triangleAO[vertexOffset/3];
if (aoValue > aoThreshold) {
aoState2 = false;
//break;
}
}
// Test for AO-Shading connectedness.
if (aoState1 == aoState2) {
// Store connectedness.
uint neighbourLabel = eqList[neighbourId];
if (neighbourLabel < label2) {
label2 = neighbourLabel;
}
}
}
}
}
if (label2 < label1) {
// Write out minimum (atomic, thus no synchronization).
atomicMin(&refList[label1], label2);
*modified = true;
}
}
__global__
void MeshAnalysis_kernel(uint* eqList, uint* refList, uint tetrahedronCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
// Test if thread is the "label owner".
if (eqList[id] == id) {
uint label = id;
uint ref = refList[label];
// Expand equivalence chain.
while (ref != label) {
label = ref;
ref = refList[label];
}
refList[id] = ref;
}
}
__global__
void MeshLabeling_kernel(uint* eqList, uint* refList, uint tetrahedronCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= tetrahedronCount) {
return;
}
eqList[id] = refList[eqList[id]];
}
/*
* Mesh centroid handling.
*/
__global__
void CentroidMap_kernel(uint* vertexLabels, uint* vertexOffsets, uint* verticesPerTetrahedron, uint* eqList, uint tetrahedronCount)
{
const uint id = Index();
// Prevent non-power of two access.
if (id >= tetrahedronCount) {
return;
}
const uint label = eqList[id];
const uint offset = vertexOffsets[id];
const uint nextOffset = offset + verticesPerTetrahedron[id];
for (uint index = offset; index < nextOffset; ++index) {
vertexLabels[index] = label;
}
}
__global__
void CentroidFinalize_kernel(float4* centroids, float4* centroidSums, uint* centroidCounts, uint centroidCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= centroidCount) {
return;
}
centroids[id].x = centroidSums[id].x / centroidCounts[id];
centroids[id].y = centroidSums[id].y / centroidCounts[id];
centroids[id].z = centroidSums[id].z / centroidCounts[id];
centroids[id].w = 1.0f;
}
__global__
void ColorizeByCentroid_kernel(float4* colors, float4* centroidColors, uint* centroidLabels, uint centroidCount, uint* vertexLabels, uint vertexCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= vertexCount) {
return;
}
for (int i = 0; i < centroidCount; ++i) {
if (vertexLabels[id] == centroidLabels[i]) {
colors[id] = centroidColors[i];
return;
}
}
}
__global__
void ColorizeByAO_kernel(float4* colors, float* triaAO, float aoThreshold, uint vertexCount)
{
const uint id = Index();
// Prevent non-power of two writes.
if (id >= vertexCount) {
return;
}
if( triaAO[id/3] > aoThreshold )
colors[id] = lerp( colors[id], make_float4( 0.0, 1.0, 0.25, 1.0), 0.5);
else
colors[id] = lerp( colors[id], make_float4( 1.0, 0.25, 0.0, 1.0), 0.5);
}
__global__
void ComputeTriangleAreas_kernel(float4 *pos, float *area, unsigned int maxTria)
{
unsigned int zOffset = blockIdx.z * gridDim.x * gridDim.y;
unsigned int blockId = (blockIdx.y * gridDim.x) + blockIdx.x;
unsigned int i = zOffset + (blockId * blockDim.x) + threadIdx.x;
// prevent overrunning of array boundary
if (i >= maxTria)
return;
// get all three triangle vertices
float4 v0 = pos[3*i];
float4 v1 = pos[3*i+1];
float4 v2 = pos[3*i+2];
// compute edge lengths
float a = length( v0 - v1);
float b = length( v0 - v2);
float c = length( v1 - v2);
// compute area (Heron's formula)
float rad = ( a + b + c) * ( a + b - c) * ( b + c - a) * ( c + a - b);
// make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
area[i] = 0.25f * sqrt( rad);
}
///*
// * Utility functions.
// */
//dim3 Grid(const uint size, const int threadsPerBlock) {
// //TODO: remove hardcoded hardware capabilities :(
// // see: http://code.google.com/p/thrust/source/browse/thrust/detail/backend/cuda/arch.inl
// // and http://code.google.com/p/thrust/source/browse/thrust/detail/backend/cuda/detail/safe_scan.inl
// // for refactoring.
// // Get maximum grid size of CUDA device.
// //CUdevice device;
// //cuDeviceGet(&device, 0);
// //CUdevprop deviceProps;
// //cuDeviceGetProperties(&deviceProps, device);
// //this->gridSize = dim3(deviceProps.maxGridSize[0],
// // deviceProps.maxGridSize[1],
// // deviceProps.maxGridSize[2]);
// const dim3 maxGridSize(65535, 65535, 0);
// const int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
// dim3 grid(blocksPerGrid, 1, 1);
// // Test if grid needs to be extended to 2D.
// while (grid.x > maxGridSize.x) {
// grid.x /= 2;
// grid.y *= 2;
// }
// return grid;
//}
__global__
void SetVol_kernel(float* vol)
{
volumeTex = vol;
}
__global__
void SetNeighborAtomVol_kernel(int* vol)
{
neighborAtomTex = vol;
}
/*
* Wrappers.
*/
extern "C"
cudaError BindVolumeTexture(float* textureArray) {
//return cudaBindTextureToArray(volumeTex, textureArray);
//return cudaBindTexture(0, volumeTex, textureArray, cudaCreateChannelDesc<float>());
SetVol_kernel<<<Grid(1, 1), 1>>>(textureArray);
return cudaGetLastError();
}
extern "C"
cudaError BindNeighborAtomTexture(int* textureArray) {
SetNeighborAtomVol_kernel<<<Grid(1, 1), 1>>>(textureArray);
return cudaGetLastError();
}
extern "C"
cudaError UnbindVolumeTexture()
{
return cudaSuccess;//cudaUnbindTexture(volumeTex);
}
extern "C"
cudaError BindAOVolumeTexture(cudaArray* textureArray)
{
// set texture parameters
aoVolumeTex.normalized = 1;
aoVolumeTex.filterMode = cudaFilterModeLinear;
aoVolumeTex.addressMode[0] = cudaAddressModeClamp;
aoVolumeTex.addressMode[1] = cudaAddressModeClamp;
aoVolumeTex.addressMode[2] = cudaAddressModeClamp;
// bind array to 3D texture
return cudaBindTextureToArray(aoVolumeTex, textureArray, cudaCreateChannelDesc<float>());
}
extern "C"
cudaError UnbindAOVolumeTexture()
{
return cudaUnbindTexture(aoVolumeTex);
}
extern "C"
cudaError ClassifyCubes(uint* cubeStates, float thresholdValue, uint cubeCount)
{
const int threads = 256;
ClassifyCubes_kernel<<<Grid(cubeCount, threads), threads>>>(cubeStates, thresholdValue, cubeCount);
return cudaGetLastError();
}
extern "C"
cudaError ScanCubes(uint* cubeOffsets, uint* cubeStates, uint cubeCount)
{
uint* cubeStatesLast = cubeStates + cubeCount;
thrust::exclusive_scan(thrust::device_ptr<uint>(cubeStates),
thrust::device_ptr<uint>(cubeStatesLast),
thrust::device_ptr<uint>(cubeOffsets));
return cudaGetLastError();
}
extern "C"
cudaError CompactCubes(uint* cubeMap, uint* cubeOffsets, uint* cubeStates, uint cubeCount)
{
const int threads = 256;
CompactCubes_kernel<<<Grid(cubeCount, threads), threads>>>(cubeMap, cubeOffsets, cubeStates, cubeCount);
return cudaGetLastError();
}
extern "C"
cudaError ClassifyTetrahedronsInACube(uint* verticesPerTetrahedron, uint* cubeMap, float thresholdValue, uint activeCubeCount)
{
const int threads = 64;
ClassifyTetrahedronsInACube_kernel<<<Grid(activeCubeCount, threads), threads>>>(verticesPerTetrahedron, cubeMap, thresholdValue, activeCubeCount);
return cudaGetLastError();
}
extern "C"
cudaError ScanTetrahedrons(uint* vertexOffsets, uint* verticesPerTetrahedron, uint tetrahedronCount)
{
uint* verticesPerTetrahedronLast = verticesPerTetrahedron + tetrahedronCount;
thrust::exclusive_scan(thrust::device_ptr<uint>(verticesPerTetrahedron),
thrust::device_ptr<uint>(verticesPerTetrahedronLast),
thrust::device_ptr<uint>(vertexOffsets));
return cudaGetLastError();
}
extern "C"
cudaError GenerateTriangles(float4* vertices, int* neighborAtoms, float4* normals, float translateX, float translateY, float translateZ,
float scaleX, float scaleY, float scaleZ, uint* vertexOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const int threads = GT_THREADS;
GenerateTriangles_kernel<<<Grid(tetrahedronCount, threads), threads>>>(vertices, neighborAtoms, normals, translateX, translateY, translateZ,
scaleX, scaleY, scaleZ, vertexOffsets, cubeMap, thresholdValue, tetrahedronCount);
return cudaGetLastError();
}
extern "C"
cudaError MeshReset(uint* eqList, uint* refList, uint tetrahedronCount, uint* vertexOffsets, float* triangleAO)
{
const int threads = 128;
MeshReset_kernel<<<Grid(tetrahedronCount, threads), threads>>>(eqList, refList, tetrahedronCount, vertexOffsets, triangleAO);
return cudaGetLastError();
}
extern "C"
cudaError MeshScan(float4* vertices, float4* normals, uint* vertexOffsets, float* triangleAO, float aoThreshold, uint* eqList, uint* refList, bool* modified, uint* cubeStates, uint* cubeOffsets, uint* cubeMap, float thresholdValue, uint tetrahedronCount)
{
const int threads = 128;
MeshScan_kernel<<<Grid(tetrahedronCount, threads), threads>>>(vertices, normals, vertexOffsets, triangleAO, aoThreshold, eqList, refList, modified, cubeStates, cubeOffsets, cubeMap, thresholdValue, tetrahedronCount);
return cudaGetLastError();
}
extern "C"
cudaError MeshAnalysis(uint* eqList, uint* refList, uint tetrahedronCount)
{
const int threads = 128;
MeshAnalysis_kernel<<<Grid(tetrahedronCount, threads), threads>>>(eqList, refList, tetrahedronCount);
return cudaGetLastError();
}
extern "C"
cudaError MeshLabeling(uint* eqList, uint* refList, uint tetrahedronCount)
{
const int threads = 128;
MeshLabeling_kernel<<<Grid(tetrahedronCount, threads), threads>>>(eqList, refList, tetrahedronCount);
return cudaGetLastError();
}
extern "C"
cudaError CentroidMap(uint* vertexLabels, uint* vertexOffsets, uint* verticesPerTetrahedron, uint* eqList, uint tetrahedronCount)
{
const int threads = 128;
CentroidMap_kernel<<<Grid(tetrahedronCount, threads), threads>>>(vertexLabels, vertexOffsets, verticesPerTetrahedron, eqList, tetrahedronCount);
return cudaGetLastError();
}
extern "C"
cudaError CentroidFinalize(float4* centroids, float4* centroidSums, uint* centroidCounts, uint centroidCount)
{
const int threads = 128;
CentroidFinalize_kernel<<<Grid(centroidCount, threads), threads>>>(centroids, centroidSums, centroidCounts, centroidCount);
return cudaGetLastError();
}
extern "C"
cudaError ColorizeByCentroid(float4* colors, float4* centroidColors, uint* centroidLabels, uint centroidCount, uint* vertexLabels, uint vertexCount)
{
const int threads = 128;
ColorizeByCentroid_kernel<<<Grid(vertexCount, threads), threads>>>(colors, centroidColors, centroidLabels, centroidCount, vertexLabels, vertexCount);
return cudaGetLastError();
}
extern "C"
cudaError ColorizeByAO(float4* colors, float* triangleAO, float aoThreashold, uint vertexCount)
{
const int threads = 128;
ColorizeByAO_kernel<<<Grid(vertexCount, threads), threads>>>(colors, triangleAO, aoThreashold, vertexCount);
return cudaGetLastError();
}
extern "C"
cudaError ComputeSurfaceArea(float4* verts, float* areas, unsigned int triaCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return cudaSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
ComputeTriangleAreas_kernel<<<grid, threads>>>(verts, areas, triaCount);
return cudaGetLastError();
}
__global__
void ComputeCentroidArea_kernel(float* centroidAreas, uint2* featureStartEnd, float* triaAreas, uint* vertexLabels, uint* centroidLabels, uint triaCount, uint centroidCount)
{
const uint id = Index();
// Prevent overrunning of array
if (id >= triaCount) {
return;
}
uint labelId = vertexLabels[id*3];
for (int i = 0; i < centroidCount; ++i) {
if ( labelId == centroidLabels[i]) {
atomicAdd( ¢roidAreas[i], triaAreas[id]);
if( id > 0 ) {
// check if the label of the previous triangle is different
if( vertexLabels[(id-1)*3] != labelId ) {
featureStartEnd[i].x = id;
featureStartEnd[i-1].y = id-1;
}
} else {
featureStartEnd[0].x = 0;
featureStartEnd[centroidCount-1].y = triaCount-1;
}
}
}
}
extern "C"
cudaError ComputeCentroidArea(float* centroidAreas, uint2* featureStartEnd, float* triaAreas, uint* vertexLabels, uint* centroidLabels, uint triaCount, uint centroidCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return cudaSuccess;
// do nothing for zero centroids
if (centroidCount <= 0)
return cudaSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
cudaError err = cudaMemset( centroidAreas, 0, centroidCount * sizeof(float));
if( err != cudaSuccess ) return err;
ComputeCentroidArea_kernel<<<grid, threads>>>(centroidAreas, featureStartEnd, triaAreas, vertexLabels, centroidLabels, triaCount, centroidCount);
return cudaGetLastError();
}
__global__
void ComputeTriangleAO_kernel(float4 *vertices, float4* normals, float *ao, unsigned int maxTria)
{
unsigned int zOffset = blockIdx.z * gridDim.x * gridDim.y;
unsigned int blockId = (blockIdx.y * gridDim.x) + blockIdx.x;
unsigned int i = zOffset + (blockId * blockDim.x) + threadIdx.x;
// prevent overrunning of array boundary
if (i >= maxTria)
return;
// get all three triangle vertices and normals
float3 v0 = make_float3(vertices[3*i+0].x, vertices[3*i+0].y, vertices[3*i+0].z);
float3 v1 = make_float3(vertices[3*i+1].x, vertices[3*i+1].y, vertices[3*i+1].z);
float3 v2 = make_float3(vertices[3*i+2].x, vertices[3*i+2].y, vertices[3*i+2].z);
float3 n0 = make_float3(normals[3*i+0].x, normals[3*i+0].y, normals[3*i+0].z);
float3 n1 = make_float3(normals[3*i+1].x, normals[3*i+1].y, normals[3*i+1].z);
float3 n2 = make_float3(normals[3*i+2].x, normals[3*i+2].y, normals[3*i+2].z);
ao[i] = (AOValue(v0, n0) + AOValue(v1, n1) + AOValue(v2, n2)) / 3.0f;
}
extern "C"
cudaError ComputeTriangleAO(float4* verts, float4* normals, float* ao, unsigned int triaCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return cudaSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
ComputeTriangleAO_kernel<<<grid, threads>>>(verts, normals, ao, triaCount);
return cudaGetLastError();
}
__global__
void RemoveSmallSegments_kernel(float* centroidAreas, float* triangleAO, uint* vertexLabels, uint* centroidLabels, float areaThreshold, float aoThreshold,
uint triaCount, uint centroidCount, bool* segmentsRemoved)
{
const uint id = Index();
// Prevent overrunning of array
if (id >= triaCount) {
return;
}
for (int i = 0; i < centroidCount; ++i) {
if (vertexLabels[id*3] == centroidLabels[i]) {
// check centroid area
if( centroidAreas[i] < areaThreshold ) {
// "flip" ambient occlusion factor
if( triangleAO[id] <= aoThreshold ) {
triangleAO[id] = aoThreshold + 1.0f;
} else {
triangleAO[id] = 0.0f;
}
// mark as modified
*segmentsRemoved = true;
}
}
}
}
extern "C"
cudaError RemoveSmallSegments(float* centroidAreas, float* triangleAO, uint* vertexLabels, uint* centroidLabels, float areaThreshold, float aoThreshold,
uint triaCount, uint centroidCount, bool* segmentsRemoved)
{
// do nothing for zero triangles
if (triaCount <= 0)
return cudaSuccess;
// do nothing for zero centroids
if (centroidCount <= 0)
return cudaSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
RemoveSmallSegments_kernel<<<grid, threads>>>(centroidAreas, triangleAO, vertexLabels, centroidLabels, areaThreshold, aoThreshold, triaCount, centroidCount, segmentsRemoved);
return cudaGetLastError();
}
__global__
void ComputeTriangleBBox_kernel(float* fBBoxMinX, float* fBBoxMinY, float* fBBoxMinZ,
float* fBBoxMaxX, float* fBBoxMaxY, float* fBBoxMaxZ, uint* triangleLabels,
float4* verts, uint* vertexLabels, uint triaCount)
{
const uint id = Index();
// Prevent overrunning of array
if (id >= triaCount) {
return;
}
// get min/max x/y/z of all three triangle vertices (i.e. triangle bbox)
float4 v0 = verts[3*id];
float4 v1 = verts[3*id+1];
float4 v2 = verts[3*id+2];
float3 minVec = make_float3(
min( min( v0.x, v1.x), v2.x),
min( min( v0.y, v1.y), v2.y),
min( min( v0.z, v1.z), v2.z));
float3 maxVec = make_float3(
max( max( v0.x, v1.x), v2.x),
max( max( v0.y, v1.y), v2.y),
max( max( v0.z, v1.z), v2.z));
// write result
fBBoxMinX[id] = minVec.x;
fBBoxMinY[id] = minVec.y;
fBBoxMinZ[id] = minVec.z;
fBBoxMaxX[id] = maxVec.x;
fBBoxMaxY[id] = maxVec.y;
fBBoxMaxZ[id] = maxVec.z;
triangleLabels[id] = vertexLabels[3*id];
}
extern "C"
cudaError ComputeTriangleBBox(float* fBBoxMinX, float* fBBoxMinY, float* fBBoxMinZ,
float* fBBoxMaxX, float* fBBoxMaxY, float* fBBoxMaxZ, uint* triangleLabels,
float4* verts, uint* vertexLabels, uint triaCount)
{
// do nothing for zero triangles
if (triaCount <= 0)
return cudaSuccess;
// compute area for each triangle
int threads = 256;
dim3 grid(int(ceil(float(triaCount) / float(threads))), 1, 1);
// get around maximum grid size of 65535 in each dimension
while(grid.x > 65535) {
grid.x = (unsigned int) (ceil(float(grid.x) / 2.0f));
grid.y *= 2;
}
while(grid.y > 65535) {
grid.y = (unsigned int) (ceil(float(grid.y) / 2.0f));
grid.z *= 2;
}
ComputeTriangleBBox_kernel<<<grid, threads>>>( fBBoxMinX, fBBoxMinY, fBBoxMinZ,
fBBoxMaxX, fBBoxMaxY, fBBoxMaxZ, triangleLabels, verts, vertexLabels, triaCount);
return cudaGetLastError();
}
__global__
void WritePrevTetraLabel_kernel( int2* labelPair, uint* cubeStatesOld, uint* cubeOffsetsOld, //uint* cubeMapOld,
uint* verticesPerTetrahedronOld, uint* eqListOld, uint* cubeMap, uint* verticesPerTetrahedron,
uint* eqList, uint tetrahedronCount)
{
const uint activeTetraIdx = Index();
const uint activeCubeIdx = activeTetraIdx / 6;
// Prevent non-power of two writes.
if( activeTetraIdx >= tetrahedronCount ) {
return;
}
const uint globalCubeIdx = cubeMap[activeCubeIdx];
const uint tetraIdx = activeTetraIdx % 6;
//const uint globalTetraIdx = globalCubeIdx * 6 + tetraIdx;
uint cnt1 = verticesPerTetrahedron[activeTetraIdx];
// check if cube was active in previous time step
if( cubeStatesOld[globalCubeIdx] > 0 && cnt1 > 0 ) {
// get the index of the current cube in the previous time step
uint activeCubeIdxOld = cubeOffsetsOld[globalCubeIdx];
// Test if the mapped global indes is equal --> TEST PASSED!
//if( cubeMapOld[activeCubeIdxOld] != globalCubeIdx ) {
// printf( "Greetings from thread %i!\n", activeTetraIdx);
//}
uint cnt0 = verticesPerTetrahedronOld[activeCubeIdxOld * 6 + tetraIdx];
if( cnt0 > 0 ) {
labelPair[activeTetraIdx].x = eqList[activeTetraIdx];
labelPair[activeTetraIdx].y = eqListOld[activeCubeIdxOld * 6 + tetraIdx];
} else {
labelPair[activeTetraIdx].x = -1;
labelPair[activeTetraIdx].y = -1;
}
} else {
// cube was not active previously
labelPair[activeTetraIdx].x = -1;
labelPair[activeTetraIdx].y = -1;
}
}
extern "C"
cudaError WritePrevTetraLabel( int2* labelPair, uint* cubeStatesOld, uint* cubeOffsetsOld, //uint* cubeMapOld,
uint* verticesPerTetrahedronOld, uint* eqListOld, uint* cubeMap, uint* verticesPerTetrahedron,
uint* eqList, uint tetrahedronCount)
{
const int threads = GT_THREADS;
WritePrevTetraLabel_kernel<<<Grid(tetrahedronCount, threads), threads>>>( labelPair, cubeStatesOld, cubeOffsetsOld, //cubeMapOld,
verticesPerTetrahedronOld, eqListOld, cubeMap, verticesPerTetrahedron, eqList, tetrahedronCount);
return cudaGetLastError();
}
__global__
void WriteTriangleVertexIndexList_kernel( uint* featureVertexIdx, uint* featureVertexCnt, uint* featureVertexStartIdx, uint* featureVertexIdxOut, uint triaVertexCnt, uint vertexCnt)
{
// vertex index
const uint vertexIdx = Index();
// check bounds
if( vertexIdx >= vertexCnt ) {
return;
}
// the number of duplications for vertex 'vertexIdx'
const uint fvCnt = featureVertexCnt[vertexIdx];
// the start index of the duplicates for vertex 'vertexIdx'
const uint fvStartIdx = featureVertexStartIdx[vertexIdx];
// temp variable for original vertex index
uint origIdx;
// loop over all duplicates
for( uint i = 0; i < fvCnt; i++ ) {
// get original vertex index
origIdx = featureVertexIdx[fvStartIdx + i];
// write new vertex index
featureVertexIdxOut[origIdx] = vertexIdx;
}
}
extern "C"
cudaError WriteTriangleVertexIndexList( uint* featureVertexIdx, uint* featureVertexCnt, uint* featureVertexStartIdx, uint* featureVertexIdxOut, uint triaVertexCnt, uint vertexCnt) {
const int threads = GT_THREADS;
WriteTriangleVertexIndexList_kernel<<<Grid(vertexCnt, threads), threads>>>( featureVertexIdx, featureVertexCnt, featureVertexStartIdx, featureVertexIdxOut, triaVertexCnt, vertexCnt);
return cudaGetLastError();
}
__global__
void WriteTriangleEdgeList_kernel( uint* featureVertexIdxOut, uint triaCnt, uint2 *featureEdges)
{
// feature index
const uint triangleIdx = Index() * 3;
// check bounds
if( triangleIdx >= (triaCnt * 3)) {
return;
}
// get the three vertex indices of the current triangle
uint idx0 = featureVertexIdxOut[triangleIdx];
uint idx1 = featureVertexIdxOut[triangleIdx+1];
uint idx2 = featureVertexIdxOut[triangleIdx+2];
// write three edges of the current feature triangle (vertex indices sorted)
featureEdges[triangleIdx] = make_uint2( min( idx0, idx1), max( idx0, idx1));
featureEdges[triangleIdx+1] = make_uint2( min( idx0, idx2), max( idx0, idx2));
featureEdges[triangleIdx+2] = make_uint2( min( idx1, idx2), max( idx1, idx2));
}
extern "C"
cudaError WriteTriangleEdgeList( uint* featureVertexIdxOut, uint triaCnt, uint2 *featureEdges) {
const int threads = GT_THREADS;
WriteTriangleEdgeList_kernel<<<Grid(triaCnt, threads), threads>>>( featureVertexIdxOut, triaCnt, featureEdges);
return cudaGetLastError();
}
#endif
|
51adad6a586fdfa55c6f54aea8c57fcc5e98a935.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// IDE indexing
#ifdef __JETBRAINS_IDE__
#define __host__
#define __device__
#define __shared__
#define __constant__
#define __global__
#define __HIPCC__
#include <hip/device_functions.h>
#include <__clang_cuda_builtin_vars.h>
#include <__clang_cuda_intrinsics.h>
#include <__clang_cuda_math_forward_declares.h>
#include <__clang_cuda_complex_builtins.h>
#include <__clang_cuda_cmath.h>
#endif
#include <stdint.h>
#include <memory.h>
#include <stdio.h>
#include <time.h>
#include <thread>
#include <vector>
#include <mutex>
#include <chrono>
#define RANDOM_MULTIPLIER 0x5DEECE66DULL
#define RANDOM_ADDEND 0xBULL
#define RANDOM_MASK ((1ULL << 48) - 1)
#define RANDOM_SCALE 1
#ifndef FLOOR_LEVEL
#define FLOOR_LEVEL 63
#endif
#ifndef WANTED_CACTUS_HEIGHT
#define WANTED_CACTUS_HEIGHT 8
#endif
// Random::next(bits)
__device__ inline uint32_t random_next(uint64_t *random, int32_t bits) {
*random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
return (uint32_t)(*random >> (48 - bits));
}
// new Random(seed)
#define get_random(seed) ((uint64_t)((seed ^ RANDOM_MULTIPLIER_LONG) & RANDOM_MASK))
#define get_random_unseeded(state) ((uint64_t) ((state) * RANDOM_SCALE))
__device__ int32_t next_int_unknown(uint64_t *seed, int16_t bound) {
if ((bound & -bound) == bound) {
*seed = (*seed * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
return (int32_t)((bound * (*seed >> 17)) >> 31);
}
int32_t bits, value;
do {
*seed = (*seed * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
bits = *seed >> 17;
value = bits % bound;
} while (bits - value + (bound - 1) < 0);
return value;
}
// Random::nextInt(bound)
__device__ inline uint32_t random_next_int(uint64_t *random) {
return random_next(random, 31) % 3;
}
#define TOTAL_WORK_SIZE (1LL << 48)
#ifndef WORK_UNIT_SIZE
#define WORK_UNIT_SIZE (1LL << 23)
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 256
#endif
__device__ inline int8_t extract(int32_t heightMap[], int32_t i) {
return (int8_t)(heightMap[i >> 2] >> ((i & 0b11) << 3));
}
__device__ inline void increase(int32_t heightMap[], int32_t i) {
heightMap[i >> 2] += 1 << ((i & 0b11) << 3);
}
__global__ void crack(uint64_t seed_offset, int32_t *num_seeds, uint64_t *seeds) {
uint64_t originalSeed = blockIdx.x * blockDim.x + threadIdx.x + seed_offset;
uint64_t seed = originalSeed;
int32_t heightMap[256];
for (int32_t temp = 0; temp < 256; temp++) {
heightMap[temp] = FLOOR_LEVEL | FLOOR_LEVEL << 8 | FLOOR_LEVEL << 16 | FLOOR_LEVEL << 24;
}
int16_t currentHighestPos = 0;
int16_t terrainHeight;
int16_t initialPosX, initialPosY, initialPosZ;
int16_t posX, posY, posZ;
int16_t offset, posMap;
int16_t i, a, j;
for (i = 0; i < 10; i++) {
// Keep, most threads finish early this way
if (WANTED_CACTUS_HEIGHT - extract(heightMap, currentHighestPos) + FLOOR_LEVEL > 9 * (10 - i))
return;
initialPosX = random_next(&seed, 4) + 8;
initialPosZ = random_next(&seed, 4) + 8;
terrainHeight = (extract(heightMap, initialPosX + initialPosZ * 32) + 1) * 2;
initialPosY = next_int_unknown(&seed, terrainHeight);
for (a = 0; a < 10; a++) {
posX = initialPosX + random_next(&seed, 3) - random_next(&seed, 3);
posY = initialPosY + random_next(&seed, 2) - random_next(&seed, 2);
posZ = initialPosZ + random_next(&seed, 3) - random_next(&seed, 3);
posMap = posX + posZ * 32;
// Keep
if (posY <= extract(heightMap, posMap) && posY >= 0)
continue;
offset = 1 + next_int_unknown(&seed, random_next_int(&seed) + 1);
for (j = 0; j < offset; j++) {
if ((posY + j - 1) > extract(heightMap, posMap) || posY < 0) continue;
if ((posY + j) <= extract(heightMap, (posX + 1) + posZ * 32) && posY >= 0) continue;
if ((posY + j) <= extract(heightMap, posX + (posZ - 1) * 32) && posY >= 0) continue;
if ((posY + j) <= extract(heightMap, (posX - 1) + posZ * 32) && posY >= 0) continue;
if ((posY + j) <= extract(heightMap, posX + (posZ + 1) * 32) && posY >= 0) continue;
increase(heightMap, posMap);
if (extract(heightMap, currentHighestPos) < extract(heightMap, posMap)) {
currentHighestPos = posMap;
}
}
}
if (extract(heightMap, currentHighestPos) - FLOOR_LEVEL >= WANTED_CACTUS_HEIGHT) {
int32_t index = atomicAdd(num_seeds, 1);
seeds[index] = originalSeed;
return;
}
}
}
#ifndef GPU_COUNT
#define GPU_COUNT 1
#endif
struct GPU_Node {
int GPU;
int* num_seeds;
uint64_t* seeds;
};
void setup_gpu_node(GPU_Node* node, int32_t gpu) {
hipSetDevice(gpu);
node->GPU = gpu;
hipMallocManaged(&node->num_seeds, sizeof(*node->num_seeds));
hipMallocManaged(&node->seeds, (1LL << 10)); // approx 1kb
}
#ifndef OFFSET
#define OFFSET 0
#endif
GPU_Node nodes[GPU_COUNT];
int32_t processed[GPU_COUNT];
uint64_t offset = OFFSET;
uint64_t count = 0;
std::mutex info_lock;
void gpu_manager(int32_t gpu_index) {
std::string fileName = "kaktoos_seeds" + std::to_string(gpu_index) + ".txt";
FILE *out_file = fopen(fileName.c_str(), "w");
hipSetDevice(gpu_index);
while (offset < TOTAL_WORK_SIZE) {
*nodes[gpu_index].num_seeds = 0;
hipLaunchKernelGGL(( crack), dim3(WORK_UNIT_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, offset, nodes[gpu_index].num_seeds, nodes[gpu_index].seeds);
info_lock.lock();
offset += WORK_UNIT_SIZE;
info_lock.unlock();
hipDeviceSynchronize();
for (int32_t i = 0, e = *nodes[gpu_index].num_seeds; i < e; i++) {
fprintf(out_file, "%lld, %lld\n", (long long int)nodes[gpu_index].seeds[i], (long long int)offset - WORK_UNIT_SIZE);
}
fflush(out_file);
info_lock.lock();
count += *nodes[gpu_index].num_seeds;
info_lock.unlock();
}
fclose(out_file);
}
int main() {
printf("Searching %lld total seeds...\n", TOTAL_WORK_SIZE);
std::thread threads[GPU_COUNT];
time_t startTime = time(NULL), currentTime;
for(int32_t i = 0; i < GPU_COUNT; i++) {
setup_gpu_node(&nodes[i], i);
threads[i] = std::thread(gpu_manager, i);
}
using namespace std::chrono_literals;
while (offset < TOTAL_WORK_SIZE) {
time(¤tTime);
int timeElapsed = (int)(currentTime - startTime);
double speed = (double)(offset - OFFSET) / (double)timeElapsed / 1000000.0;
printf("Searched %lld seeds, offset: %lld found %lld matches. Time elapsed: %ds. Speed: %.2fm seeds/s. %f%%\n",
(long long int)(offset - OFFSET), (long long int)offset, (long long int)count, timeElapsed, speed, (double)offset / TOTAL_WORK_SIZE * 100);
std::this_thread::sleep_for(0.5s);
}
}
| 51adad6a586fdfa55c6f54aea8c57fcc5e98a935.cu | // IDE indexing
#ifdef __JETBRAINS_IDE__
#define __host__
#define __device__
#define __shared__
#define __constant__
#define __global__
#define __CUDACC__
#include <device_functions.h>
#include <__clang_cuda_builtin_vars.h>
#include <__clang_cuda_intrinsics.h>
#include <__clang_cuda_math_forward_declares.h>
#include <__clang_cuda_complex_builtins.h>
#include <__clang_cuda_cmath.h>
#endif
#include <stdint.h>
#include <memory.h>
#include <stdio.h>
#include <time.h>
#include <thread>
#include <vector>
#include <mutex>
#include <chrono>
#define RANDOM_MULTIPLIER 0x5DEECE66DULL
#define RANDOM_ADDEND 0xBULL
#define RANDOM_MASK ((1ULL << 48) - 1)
#define RANDOM_SCALE 1
#ifndef FLOOR_LEVEL
#define FLOOR_LEVEL 63
#endif
#ifndef WANTED_CACTUS_HEIGHT
#define WANTED_CACTUS_HEIGHT 8
#endif
// Random::next(bits)
__device__ inline uint32_t random_next(uint64_t *random, int32_t bits) {
*random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
return (uint32_t)(*random >> (48 - bits));
}
// new Random(seed)
#define get_random(seed) ((uint64_t)((seed ^ RANDOM_MULTIPLIER_LONG) & RANDOM_MASK))
#define get_random_unseeded(state) ((uint64_t) ((state) * RANDOM_SCALE))
__device__ int32_t next_int_unknown(uint64_t *seed, int16_t bound) {
if ((bound & -bound) == bound) {
*seed = (*seed * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
return (int32_t)((bound * (*seed >> 17)) >> 31);
}
int32_t bits, value;
do {
*seed = (*seed * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK;
bits = *seed >> 17;
value = bits % bound;
} while (bits - value + (bound - 1) < 0);
return value;
}
// Random::nextInt(bound)
__device__ inline uint32_t random_next_int(uint64_t *random) {
return random_next(random, 31) % 3;
}
#define TOTAL_WORK_SIZE (1LL << 48)
#ifndef WORK_UNIT_SIZE
#define WORK_UNIT_SIZE (1LL << 23)
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 256
#endif
__device__ inline int8_t extract(int32_t heightMap[], int32_t i) {
return (int8_t)(heightMap[i >> 2] >> ((i & 0b11) << 3));
}
__device__ inline void increase(int32_t heightMap[], int32_t i) {
heightMap[i >> 2] += 1 << ((i & 0b11) << 3);
}
__global__ void crack(uint64_t seed_offset, int32_t *num_seeds, uint64_t *seeds) {
uint64_t originalSeed = blockIdx.x * blockDim.x + threadIdx.x + seed_offset;
uint64_t seed = originalSeed;
int32_t heightMap[256];
for (int32_t temp = 0; temp < 256; temp++) {
heightMap[temp] = FLOOR_LEVEL | FLOOR_LEVEL << 8 | FLOOR_LEVEL << 16 | FLOOR_LEVEL << 24;
}
int16_t currentHighestPos = 0;
int16_t terrainHeight;
int16_t initialPosX, initialPosY, initialPosZ;
int16_t posX, posY, posZ;
int16_t offset, posMap;
int16_t i, a, j;
for (i = 0; i < 10; i++) {
// Keep, most threads finish early this way
if (WANTED_CACTUS_HEIGHT - extract(heightMap, currentHighestPos) + FLOOR_LEVEL > 9 * (10 - i))
return;
initialPosX = random_next(&seed, 4) + 8;
initialPosZ = random_next(&seed, 4) + 8;
terrainHeight = (extract(heightMap, initialPosX + initialPosZ * 32) + 1) * 2;
initialPosY = next_int_unknown(&seed, terrainHeight);
for (a = 0; a < 10; a++) {
posX = initialPosX + random_next(&seed, 3) - random_next(&seed, 3);
posY = initialPosY + random_next(&seed, 2) - random_next(&seed, 2);
posZ = initialPosZ + random_next(&seed, 3) - random_next(&seed, 3);
posMap = posX + posZ * 32;
// Keep
if (posY <= extract(heightMap, posMap) && posY >= 0)
continue;
offset = 1 + next_int_unknown(&seed, random_next_int(&seed) + 1);
for (j = 0; j < offset; j++) {
if ((posY + j - 1) > extract(heightMap, posMap) || posY < 0) continue;
if ((posY + j) <= extract(heightMap, (posX + 1) + posZ * 32) && posY >= 0) continue;
if ((posY + j) <= extract(heightMap, posX + (posZ - 1) * 32) && posY >= 0) continue;
if ((posY + j) <= extract(heightMap, (posX - 1) + posZ * 32) && posY >= 0) continue;
if ((posY + j) <= extract(heightMap, posX + (posZ + 1) * 32) && posY >= 0) continue;
increase(heightMap, posMap);
if (extract(heightMap, currentHighestPos) < extract(heightMap, posMap)) {
currentHighestPos = posMap;
}
}
}
if (extract(heightMap, currentHighestPos) - FLOOR_LEVEL >= WANTED_CACTUS_HEIGHT) {
int32_t index = atomicAdd(num_seeds, 1);
seeds[index] = originalSeed;
return;
}
}
}
#ifndef GPU_COUNT
#define GPU_COUNT 1
#endif
struct GPU_Node {
int GPU;
int* num_seeds;
uint64_t* seeds;
};
void setup_gpu_node(GPU_Node* node, int32_t gpu) {
cudaSetDevice(gpu);
node->GPU = gpu;
cudaMallocManaged(&node->num_seeds, sizeof(*node->num_seeds));
cudaMallocManaged(&node->seeds, (1LL << 10)); // approx 1kb
}
#ifndef OFFSET
#define OFFSET 0
#endif
GPU_Node nodes[GPU_COUNT];
int32_t processed[GPU_COUNT];
uint64_t offset = OFFSET;
uint64_t count = 0;
std::mutex info_lock;
void gpu_manager(int32_t gpu_index) {
std::string fileName = "kaktoos_seeds" + std::to_string(gpu_index) + ".txt";
FILE *out_file = fopen(fileName.c_str(), "w");
cudaSetDevice(gpu_index);
while (offset < TOTAL_WORK_SIZE) {
*nodes[gpu_index].num_seeds = 0;
crack<<<WORK_UNIT_SIZE / BLOCK_SIZE, BLOCK_SIZE, 0>>> (offset, nodes[gpu_index].num_seeds, nodes[gpu_index].seeds);
info_lock.lock();
offset += WORK_UNIT_SIZE;
info_lock.unlock();
cudaDeviceSynchronize();
for (int32_t i = 0, e = *nodes[gpu_index].num_seeds; i < e; i++) {
fprintf(out_file, "%lld, %lld\n", (long long int)nodes[gpu_index].seeds[i], (long long int)offset - WORK_UNIT_SIZE);
}
fflush(out_file);
info_lock.lock();
count += *nodes[gpu_index].num_seeds;
info_lock.unlock();
}
fclose(out_file);
}
int main() {
printf("Searching %lld total seeds...\n", TOTAL_WORK_SIZE);
std::thread threads[GPU_COUNT];
time_t startTime = time(NULL), currentTime;
for(int32_t i = 0; i < GPU_COUNT; i++) {
setup_gpu_node(&nodes[i], i);
threads[i] = std::thread(gpu_manager, i);
}
using namespace std::chrono_literals;
while (offset < TOTAL_WORK_SIZE) {
time(¤tTime);
int timeElapsed = (int)(currentTime - startTime);
double speed = (double)(offset - OFFSET) / (double)timeElapsed / 1000000.0;
printf("Searched %lld seeds, offset: %lld found %lld matches. Time elapsed: %ds. Speed: %.2fm seeds/s. %f%%\n",
(long long int)(offset - OFFSET), (long long int)offset, (long long int)count, timeElapsed, speed, (double)offset / TOTAL_WORK_SIZE * 100);
std::this_thread::sleep_for(0.5s);
}
}
|
4d54a1409c8bb87b2afb7a001a8ff81bf6cfd4c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/resize_layer.hpp"
#include "caffe/util/util_img.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ResizeForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int resized_height, const int resized_width,
const Dtype resize_ratio, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int rw = index % resized_width;
int rh = (index / resized_width) % resized_height;
int c = (index / resized_width / resized_height) % channels;
int n = index / resized_width / resized_height / channels;
int h = int(rh / resize_ratio);
int w = int(rw / resize_ratio);
bottom_data += (n * channels + c) * height * width;
top_data[index] = bottom_data[h * width + w];
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
switch (this->layer_param_.resize_param().type())
{
case ResizeParameter_Type_BILINEAR:
ResizeBlob_gpu(bottom[0],top[0] );
break;
case ResizeParameter_Type_NEAREST:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ResizeForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, resized_height_, resized_width_, resize_ratio_, top_data);
CUDA_POST_KERNEL_CHECK;
break;
default:
LOG(FATAL) << "Unknown resize type.";
}
}
template <typename Dtype>
__global__ void ResizeBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int resized_height, const int resized_width,
const Dtype resize_ratio, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int hstart = int(h * resize_ratio);
int wstart = int(w * resize_ratio);
int hend = int(hstart + resize_ratio);
int wend = int(wstart + resize_ratio);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, resized_height);
wend = min(wend, resized_width);
top_diff += (n * channels + c) * resized_height * resized_width;
for (int rh = hstart; rh < hend; ++rh) {
for (int rw = wstart; rw < wend; ++rw) {
bottom_diff[index] += top_diff[rh * resized_width + rw];
}
}
}
}
template <typename Dtype>
__global__ void kernel_ResizeBackward(const int nthreads, const Dtype* top_diff, const int top_step,
Dtype* bottom_diff, const int bottom_step,
const Dtype* loc1,const Dtype* weight1, const Dtype* loc2, const Dtype* weight2,
const Dtype* loc3,const Dtype* weight3, const Dtype* loc4, const Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_diff_offset = bottom_step*index;
int top_diff_offset = top_step*index;
for (int idx = 0; idx < top_step; ++idx)
{
bottom_diff[bottom_diff_offset + int(loc1[idx])] += top_diff[top_diff_offset+idx]*weight1[idx];
bottom_diff[bottom_diff_offset + int(loc2[idx])] += top_diff[top_diff_offset+idx]*weight2[idx];
bottom_diff[bottom_diff_offset + int(loc3[idx])] += top_diff[top_diff_offset+idx]*weight3[idx];
bottom_diff[bottom_diff_offset + int(loc4[idx])] += top_diff[top_diff_offset+idx]*weight4[idx];
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const Dtype* loc1 = this->locs_[0]->gpu_data();
const Dtype* weight1 = this->locs_[0]->gpu_diff();
const Dtype* loc2 = this->locs_[1]->gpu_data();
const Dtype* weight2 = this->locs_[1]->gpu_diff();
const Dtype* loc3 = this->locs_[2]->gpu_data();
const Dtype* weight3 = this->locs_[2]->gpu_diff();
const Dtype* loc4 = this->locs_[3]->gpu_data();
const Dtype* weight4 = this->locs_[3]->gpu_diff();
const int top_step = top[0]->offset(0,1);
const int bottom_step = bottom[0]->offset(0,1);
int loop_n = this->out_num_ * this->out_channels_;
switch (this->layer_param_.resize_param().type())
{
case ResizeParameter_Type_BILINEAR:
caffe::GetBiLinearResizeMatRules_gpu( bottom[0]->height(),bottom[0]->width(),
top[0]->height(), top[0]->width(),
this->locs_[0]->mutable_gpu_data(), this->locs_[0]->mutable_gpu_diff(),
this->locs_[1]->mutable_gpu_data(), this->locs_[1]->mutable_gpu_diff(),
this->locs_[2]->mutable_gpu_data(), this->locs_[2]->mutable_gpu_diff(),
this->locs_[3]->mutable_gpu_data(), this->locs_[3]->mutable_gpu_diff() );
hipLaunchKernelGGL(( kernel_ResizeBackward<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
loop_n, top_diff, top_step,
bottom_diff, bottom_step,
loc1,weight1, loc2, weight2,
loc3,weight3,loc4, weight4);
CUDA_POST_KERNEL_CHECK;
break;
case ResizeParameter_Type_NEAREST:
hipLaunchKernelGGL(( ResizeBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, resized_height_, resized_width_, resize_ratio_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
break;
default:
LOG(FATAL) << "Unknown resize type.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
| 4d54a1409c8bb87b2afb7a001a8ff81bf6cfd4c5.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/resize_layer.hpp"
#include "caffe/util/util_img.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ResizeForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int resized_height, const int resized_width,
const Dtype resize_ratio, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int rw = index % resized_width;
int rh = (index / resized_width) % resized_height;
int c = (index / resized_width / resized_height) % channels;
int n = index / resized_width / resized_height / channels;
int h = int(rh / resize_ratio);
int w = int(rw / resize_ratio);
bottom_data += (n * channels + c) * height * width;
top_data[index] = bottom_data[h * width + w];
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
switch (this->layer_param_.resize_param().type())
{
case ResizeParameter_Type_BILINEAR:
ResizeBlob_gpu(bottom[0],top[0] );
break;
case ResizeParameter_Type_NEAREST:
// NOLINT_NEXT_LINE(whitespace/operators)
ResizeForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, resized_height_, resized_width_, resize_ratio_, top_data);
CUDA_POST_KERNEL_CHECK;
break;
default:
LOG(FATAL) << "Unknown resize type.";
}
}
template <typename Dtype>
__global__ void ResizeBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int resized_height, const int resized_width,
const Dtype resize_ratio, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int hstart = int(h * resize_ratio);
int wstart = int(w * resize_ratio);
int hend = int(hstart + resize_ratio);
int wend = int(wstart + resize_ratio);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, resized_height);
wend = min(wend, resized_width);
top_diff += (n * channels + c) * resized_height * resized_width;
for (int rh = hstart; rh < hend; ++rh) {
for (int rw = wstart; rw < wend; ++rw) {
bottom_diff[index] += top_diff[rh * resized_width + rw];
}
}
}
}
template <typename Dtype>
__global__ void kernel_ResizeBackward(const int nthreads, const Dtype* top_diff, const int top_step,
Dtype* bottom_diff, const int bottom_step,
const Dtype* loc1,const Dtype* weight1, const Dtype* loc2, const Dtype* weight2,
const Dtype* loc3,const Dtype* weight3, const Dtype* loc4, const Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_diff_offset = bottom_step*index;
int top_diff_offset = top_step*index;
for (int idx = 0; idx < top_step; ++idx)
{
bottom_diff[bottom_diff_offset + int(loc1[idx])] += top_diff[top_diff_offset+idx]*weight1[idx];
bottom_diff[bottom_diff_offset + int(loc2[idx])] += top_diff[top_diff_offset+idx]*weight2[idx];
bottom_diff[bottom_diff_offset + int(loc3[idx])] += top_diff[top_diff_offset+idx]*weight3[idx];
bottom_diff[bottom_diff_offset + int(loc4[idx])] += top_diff[top_diff_offset+idx]*weight4[idx];
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const Dtype* loc1 = this->locs_[0]->gpu_data();
const Dtype* weight1 = this->locs_[0]->gpu_diff();
const Dtype* loc2 = this->locs_[1]->gpu_data();
const Dtype* weight2 = this->locs_[1]->gpu_diff();
const Dtype* loc3 = this->locs_[2]->gpu_data();
const Dtype* weight3 = this->locs_[2]->gpu_diff();
const Dtype* loc4 = this->locs_[3]->gpu_data();
const Dtype* weight4 = this->locs_[3]->gpu_diff();
const int top_step = top[0]->offset(0,1);
const int bottom_step = bottom[0]->offset(0,1);
int loop_n = this->out_num_ * this->out_channels_;
switch (this->layer_param_.resize_param().type())
{
case ResizeParameter_Type_BILINEAR:
caffe::GetBiLinearResizeMatRules_gpu( bottom[0]->height(),bottom[0]->width(),
top[0]->height(), top[0]->width(),
this->locs_[0]->mutable_gpu_data(), this->locs_[0]->mutable_gpu_diff(),
this->locs_[1]->mutable_gpu_data(), this->locs_[1]->mutable_gpu_diff(),
this->locs_[2]->mutable_gpu_data(), this->locs_[2]->mutable_gpu_diff(),
this->locs_[3]->mutable_gpu_data(), this->locs_[3]->mutable_gpu_diff() );
kernel_ResizeBackward<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(
loop_n, top_diff, top_step,
bottom_diff, bottom_step,
loc1,weight1, loc2, weight2,
loc3,weight3,loc4, weight4);
CUDA_POST_KERNEL_CHECK;
break;
case ResizeParameter_Type_NEAREST:
ResizeBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, resized_height_, resized_width_, resize_ratio_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
break;
default:
LOG(FATAL) << "Unknown resize type.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
|
22c2ede62e78a534ea8aa4d9d398d71af1866f7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @brief
* log
*
* @copyright
* Copyright (c) 2020 Fangjun Kuang ([email protected])
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <gtest/gtest.h>
#include "k2/csrc/log.h"
namespace k2 {
TEST(Log, Cpu) {
K2_LOG(DEBUG) << "Debug message";
K2_LOG(INFO) << "Info message";
K2_LOG(WARNING) << "Warning message";
K2_LOG(ERROR) << "Error message";
K2_DLOG(INFO) << "This is printed only in debug mode";
int32_t a = 10;
int32_t b = 20;
int32_t c = 30;
int32_t d = a;
K2_DCHECK_EQ(a, d) << "This is checked only in debug mode";
K2_CHECK(a == d) << "failed";
K2_CHECK_EQ(a, d) << "failed";
K2_CHECK_NE(a, b) << "failed";
K2_CHECK_LE(a, b) << "failed";
K2_CHECK_LT(a, b) << "failed";
K2_CHECK_GE(c, b) << "failed";
K2_CHECK_GT(c, b) << "failed";
}
__global__ void DummyKernel(int32_t *b, int32_t a) {
K2_DLOG(INFO) << "In kernel";
K2_DCHECK_LT(*b, a); // enabled only in debug mode
*b += 1;
K2_CHECK_EQ(*b, a);
K2_DLOG(DEBUG) << "Done";
}
TEST(Log, Cuda) {
K2_LOG(INFO) << "Test log for cuda";
int32_t a = 10;
int32_t *b = nullptr;
auto ret = hipMalloc(&b, sizeof(a));
K2_CHECK_EQ(ret, hipSuccess) << "Failed to allocate memory";
ret = hipMemcpy(b, &a, sizeof(a), hipMemcpyHostToDevice);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory to gpu";
hipLaunchKernelGGL(( DummyKernel), dim3(1), dim3(1), 0, 0, b, a + 1);
int32_t c = 0;
ret = hipMemcpy(&c, b, sizeof(a), hipMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory from gpu";
K2_CHECK_EQ(a + 1, c) << "Error in the kernel!";
ret = hipFree(b);
K2_CHECK_CUDA_ERROR(ret) << "Failed to free gpu memory";
}
TEST(LogDeathTest, NegativeCases) {
ASSERT_DEATH(K2_LOG(FATAL) << "This will crash the program", "");
int32_t a = 10;
int32_t b = 20;
int32_t c = a;
ASSERT_DEATH(K2_CHECK_EQ(a, b), "");
ASSERT_DEATH(K2_CHECK_NE(a, c), "");
ASSERT_DEATH(K2_CHECK_LE(b, a), "");
ASSERT_DEATH(K2_CHECK_LT(b, a), "");
ASSERT_DEATH(K2_CHECK_GE(a, b), "");
ASSERT_DEATH(K2_CHECK_GT(a, b), "");
auto ret = hipErrorMemoryAllocation;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
ret = hipErrorAssert;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
// NOTE: normally we do not need to
// check if NDEBUG is defined in order
// to use K2_DCHECK_*. ASSERT_DEATH
// expects that the statement will make
// the program crash and this is only
// possible for the debug build,
// so we have to add a guard here.
#if !defined(NDEBUG)
K2_LOG(INFO) << "Check for debug build";
ASSERT_DEATH(K2_DLOG(FATAL) << "This will crash the program", "");
ASSERT_DEATH(K2_DCHECK_EQ(a, b), "");
ASSERT_DEATH(K2_DCHECK_NE(a, c), "");
ASSERT_DEATH(K2_DCHECK_LE(b, a), "");
ASSERT_DEATH(K2_DCHECK_LT(b, a), "");
ASSERT_DEATH(K2_DCHECK_GE(a, b), "");
ASSERT_DEATH(K2_DCHECK_GT(a, b), "");
ret = hipErrorInitializationError;
ASSERT_DEATH(K2_DCHECK_CUDA_ERROR(ret), "");
#endif
}
} // namespace k2
| 22c2ede62e78a534ea8aa4d9d398d71af1866f7d.cu | /**
* @brief
* log
*
* @copyright
* Copyright (c) 2020 Fangjun Kuang ([email protected])
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <gtest/gtest.h>
#include "k2/csrc/log.h"
namespace k2 {
TEST(Log, Cpu) {
K2_LOG(DEBUG) << "Debug message";
K2_LOG(INFO) << "Info message";
K2_LOG(WARNING) << "Warning message";
K2_LOG(ERROR) << "Error message";
K2_DLOG(INFO) << "This is printed only in debug mode";
int32_t a = 10;
int32_t b = 20;
int32_t c = 30;
int32_t d = a;
K2_DCHECK_EQ(a, d) << "This is checked only in debug mode";
K2_CHECK(a == d) << "failed";
K2_CHECK_EQ(a, d) << "failed";
K2_CHECK_NE(a, b) << "failed";
K2_CHECK_LE(a, b) << "failed";
K2_CHECK_LT(a, b) << "failed";
K2_CHECK_GE(c, b) << "failed";
K2_CHECK_GT(c, b) << "failed";
}
__global__ void DummyKernel(int32_t *b, int32_t a) {
K2_DLOG(INFO) << "In kernel";
K2_DCHECK_LT(*b, a); // enabled only in debug mode
*b += 1;
K2_CHECK_EQ(*b, a);
K2_DLOG(DEBUG) << "Done";
}
TEST(Log, Cuda) {
K2_LOG(INFO) << "Test log for cuda";
int32_t a = 10;
int32_t *b = nullptr;
auto ret = cudaMalloc(&b, sizeof(a));
K2_CHECK_EQ(ret, cudaSuccess) << "Failed to allocate memory";
ret = cudaMemcpy(b, &a, sizeof(a), cudaMemcpyHostToDevice);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory to gpu";
DummyKernel<<<1, 1>>>(b, a + 1);
int32_t c = 0;
ret = cudaMemcpy(&c, b, sizeof(a), cudaMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory from gpu";
K2_CHECK_EQ(a + 1, c) << "Error in the kernel!";
ret = cudaFree(b);
K2_CHECK_CUDA_ERROR(ret) << "Failed to free gpu memory";
}
TEST(LogDeathTest, NegativeCases) {
ASSERT_DEATH(K2_LOG(FATAL) << "This will crash the program", "");
int32_t a = 10;
int32_t b = 20;
int32_t c = a;
ASSERT_DEATH(K2_CHECK_EQ(a, b), "");
ASSERT_DEATH(K2_CHECK_NE(a, c), "");
ASSERT_DEATH(K2_CHECK_LE(b, a), "");
ASSERT_DEATH(K2_CHECK_LT(b, a), "");
ASSERT_DEATH(K2_CHECK_GE(a, b), "");
ASSERT_DEATH(K2_CHECK_GT(a, b), "");
auto ret = cudaErrorMemoryAllocation;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
ret = cudaErrorAssert;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
// NOTE: normally we do not need to
// check if NDEBUG is defined in order
// to use K2_DCHECK_*. ASSERT_DEATH
// expects that the statement will make
// the program crash and this is only
// possible for the debug build,
// so we have to add a guard here.
#if !defined(NDEBUG)
K2_LOG(INFO) << "Check for debug build";
ASSERT_DEATH(K2_DLOG(FATAL) << "This will crash the program", "");
ASSERT_DEATH(K2_DCHECK_EQ(a, b), "");
ASSERT_DEATH(K2_DCHECK_NE(a, c), "");
ASSERT_DEATH(K2_DCHECK_LE(b, a), "");
ASSERT_DEATH(K2_DCHECK_LT(b, a), "");
ASSERT_DEATH(K2_DCHECK_GE(a, b), "");
ASSERT_DEATH(K2_DCHECK_GT(a, b), "");
ret = cudaErrorInitializationError;
ASSERT_DEATH(K2_DCHECK_CUDA_ERROR(ret), "");
#endif
}
} // namespace k2
|
1eb340617550ea290b27f74254e29c96c196c11d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include <cstdlib>
#include <cstdio>
#include <stdexcept>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <hipcub/hipcub.hpp>
void setup_rng(hiprandGenerator_t& gen, uint64_t seed) {
// create the rng
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
// set the rng seed
hiprandSetPseudoRandomGeneratorSeed(gen, seed);
}
struct range_op: public thrust::unary_function<int,int> {
const unsigned int range;
const int min_val;
__host__ __device__
range_op(unsigned int _range, int _min_val):
range(_range), min_val(_min_val) {}
__host__ __device__
int operator()(int x) {
unsigned int y = (unsigned int)x;
return ((int) (y % range)) + min_val;
}
};
void map_to_range(int min_val, int max_val, int* a, size_t n) {
// functor to perform the map
int range = max_val-min_val;
thrust::device_ptr<int> a_ptr = thrust::device_pointer_cast(a);
thrust::transform(a_ptr,a_ptr+n,a_ptr,range_op(range,min_val));
}
int main() {
using std::cout;
using std::endl;
// settings
size_t num_items = 20;
// allocate memory on device
int* keys = NULL;
int* vals = NULL;
hipMalloc(&keys,num_items*sizeof(*keys));
hipMalloc(&vals,num_items*sizeof(*vals));
// setup random number generator
hiprandGenerator_t gen;
setup_rng(gen,1010ULL);
// generate random numbers & map to a range
hiprandGenerate(gen, (unsigned int*)keys, num_items);
map_to_range(0,5,keys,num_items);
// sequence the values
thrust::device_ptr<int> vals_ptr = thrust::device_pointer_cast(vals);
thrust::sequence(vals_ptr,vals_ptr+num_items);
// prepare for cub
/// allocate output buffers
int* keys_alt = NULL;
int* vals_alt = NULL;
hipMalloc(&keys_alt,num_items*sizeof(*keys_alt));
hipMalloc(&vals_alt,num_items*sizeof(*vals_alt));
/// create double buffers
cub::DoubleBuffer<int> keys_dbuf(keys,keys_alt);
cub::DoubleBuffer<int> vals_dbuf(vals,vals_alt);
/// allocate temporary storage
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, keys_dbuf,
vals_dbuf, num_items);
hipMalloc(&d_temp_storage, temp_storage_bytes);
// sort with cub
hipcub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, keys_dbuf,
vals_dbuf, num_items);
//thrust::sort(thrust::device,a,a+n);
// create device ptr and inspect
thrust::device_ptr<int> keys_ptr = thrust::device_pointer_cast(keys_dbuf.Current());
vals_ptr = thrust::device_pointer_cast(vals_dbuf.Current());
for (size_t i=0; i<num_items; i++) {
cout << i << ": key = " << keys_ptr[i] << ", val = " << vals_ptr[i] << endl;
}
// run length encode
/// compacted keys
int* keys_cmp = NULL; hipMalloc(&keys_cmp,num_items*sizeof(*keys_cmp));
/// count of keys
int* keys_cnt = NULL; hipMalloc(&keys_cnt,num_items*sizeof(*keys_cnt));
/// number of computed segments
int* num_segments = NULL; hipMalloc(&num_segments,sizeof(int));
/// temporary storage
void* rle_temp_storage = NULL;
size_t rle_storage_bytes = 0;
hipcub::DeviceRunLengthEncode::Encode(rle_temp_storage, rle_storage_bytes, keys_dbuf.Current(),
keys_cmp, keys_cnt, num_segments, num_items);
hipMalloc(&rle_temp_storage,rle_storage_bytes);
/// perform rle
hipcub::DeviceRunLengthEncode::Encode(rle_temp_storage, rle_storage_bytes, keys_dbuf.Current(),
keys_cmp, keys_cnt, num_segments, num_items);
// look at results
cout << "run length encode..." << endl;
thrust::device_ptr<int> cmp_ptr = thrust::device_pointer_cast(keys_cmp);
thrust::device_ptr<int> cnt_ptr = thrust::device_pointer_cast(keys_cnt);
thrust::device_ptr<int> seg_ptr = thrust::device_pointer_cast(num_segments);
for (size_t i = 0; i < seg_ptr[0]; i++) {
cout << i << ": key = " << cmp_ptr[i] << ", cnt = " << cnt_ptr[i] << endl;
}
return 0;
}
| 1eb340617550ea290b27f74254e29c96c196c11d.cu | #include <iostream>
#include <algorithm>
#include <cstdlib>
#include <cstdio>
#include <stdexcept>
#include <cuda.h>
#include <curand.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <cub/cub.cuh>
void setup_rng(curandGenerator_t& gen, uint64_t seed) {
// create the rng
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// set the rng seed
curandSetPseudoRandomGeneratorSeed(gen, seed);
}
struct range_op: public thrust::unary_function<int,int> {
const unsigned int range;
const int min_val;
__host__ __device__
range_op(unsigned int _range, int _min_val):
range(_range), min_val(_min_val) {}
__host__ __device__
int operator()(int x) {
unsigned int y = (unsigned int)x;
return ((int) (y % range)) + min_val;
}
};
void map_to_range(int min_val, int max_val, int* a, size_t n) {
// functor to perform the map
int range = max_val-min_val;
thrust::device_ptr<int> a_ptr = thrust::device_pointer_cast(a);
thrust::transform(a_ptr,a_ptr+n,a_ptr,range_op(range,min_val));
}
int main() {
using std::cout;
using std::endl;
// settings
size_t num_items = 20;
// allocate memory on device
int* keys = NULL;
int* vals = NULL;
cudaMalloc(&keys,num_items*sizeof(*keys));
cudaMalloc(&vals,num_items*sizeof(*vals));
// setup random number generator
curandGenerator_t gen;
setup_rng(gen,1010ULL);
// generate random numbers & map to a range
curandGenerate(gen, (unsigned int*)keys, num_items);
map_to_range(0,5,keys,num_items);
// sequence the values
thrust::device_ptr<int> vals_ptr = thrust::device_pointer_cast(vals);
thrust::sequence(vals_ptr,vals_ptr+num_items);
// prepare for cub
/// allocate output buffers
int* keys_alt = NULL;
int* vals_alt = NULL;
cudaMalloc(&keys_alt,num_items*sizeof(*keys_alt));
cudaMalloc(&vals_alt,num_items*sizeof(*vals_alt));
/// create double buffers
cub::DoubleBuffer<int> keys_dbuf(keys,keys_alt);
cub::DoubleBuffer<int> vals_dbuf(vals,vals_alt);
/// allocate temporary storage
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, keys_dbuf,
vals_dbuf, num_items);
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// sort with cub
cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, keys_dbuf,
vals_dbuf, num_items);
//thrust::sort(thrust::device,a,a+n);
// create device ptr and inspect
thrust::device_ptr<int> keys_ptr = thrust::device_pointer_cast(keys_dbuf.Current());
vals_ptr = thrust::device_pointer_cast(vals_dbuf.Current());
for (size_t i=0; i<num_items; i++) {
cout << i << ": key = " << keys_ptr[i] << ", val = " << vals_ptr[i] << endl;
}
// run length encode
/// compacted keys
int* keys_cmp = NULL; cudaMalloc(&keys_cmp,num_items*sizeof(*keys_cmp));
/// count of keys
int* keys_cnt = NULL; cudaMalloc(&keys_cnt,num_items*sizeof(*keys_cnt));
/// number of computed segments
int* num_segments = NULL; cudaMalloc(&num_segments,sizeof(int));
/// temporary storage
void* rle_temp_storage = NULL;
size_t rle_storage_bytes = 0;
cub::DeviceRunLengthEncode::Encode(rle_temp_storage, rle_storage_bytes, keys_dbuf.Current(),
keys_cmp, keys_cnt, num_segments, num_items);
cudaMalloc(&rle_temp_storage,rle_storage_bytes);
/// perform rle
cub::DeviceRunLengthEncode::Encode(rle_temp_storage, rle_storage_bytes, keys_dbuf.Current(),
keys_cmp, keys_cnt, num_segments, num_items);
// look at results
cout << "run length encode..." << endl;
thrust::device_ptr<int> cmp_ptr = thrust::device_pointer_cast(keys_cmp);
thrust::device_ptr<int> cnt_ptr = thrust::device_pointer_cast(keys_cnt);
thrust::device_ptr<int> seg_ptr = thrust::device_pointer_cast(num_segments);
for (size_t i = 0; i < seg_ptr[0]; i++) {
cout << i << ": key = " << cmp_ptr[i] << ", cnt = " << cnt_ptr[i] << endl;
}
return 0;
}
|
4d0689022ad9e2da7c4ccaccdb42713d0252159c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
__global__ void kernel() {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t n = tid;
uint32_t sum = 0;
uint32_t prod = 1;
while(n != 0){
uint32_t digit = n % 10;
n /= 10;
sum += digit;
prod *= digit;
}
if(sum*prod == tid) printf("%u\n", tid);
return;
}
void checkrange(uint32_t range){
double dim = sqrt(range);
printf("Checking %u for sum-product numbers\n", range);
uint32_t blocks = (uint32_t) ceil(range/(dim));
printf("dim: %d, blocks: %d\n", (uint32_t) dim, blocks);
hipLaunchKernelGGL(( kernel), dim3((uint32_t)dim), dim3(blocks), 0, 0, );
hipError_t rc = hipDeviceSynchronize();
rc = hipGetLastError();
printf("Last error : %s\n", hipGetErrorString(rc));
}
int main() {
// main iteration
checkrange(1024);
checkrange(1048576); // max range regarding the device capabilities (1024 threads/blocks)
return 0;
} | 4d0689022ad9e2da7c4ccaccdb42713d0252159c.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdint.h>
__global__ void kernel() {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t n = tid;
uint32_t sum = 0;
uint32_t prod = 1;
while(n != 0){
uint32_t digit = n % 10;
n /= 10;
sum += digit;
prod *= digit;
}
if(sum*prod == tid) printf("%u\n", tid);
return;
}
void checkrange(uint32_t range){
double dim = sqrt(range);
printf("Checking %u for sum-product numbers\n", range);
uint32_t blocks = (uint32_t) ceil(range/(dim));
printf("dim: %d, blocks: %d\n", (uint32_t) dim, blocks);
kernel<<<(uint32_t)dim, blocks, 0>>>();
cudaError_t rc = cudaDeviceSynchronize();
rc = cudaGetLastError();
printf("Last error : %s\n", cudaGetErrorString(rc));
}
int main() {
// main iteration
checkrange(1024);
checkrange(1048576); // max range regarding the device capabilities (1024 threads/blocks)
return 0;
} |
4b630517059d6f02c02c1f2198a7fae9b5877090.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::conv2d(const Tensor& input,
int outChannels,
int kernelH, int kernelW,
int strideH, int strideW,
int paddingH, int paddingW,
ActiMode activation,
bool use_bias,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
assert(input.numDim == 4); /*NCHW*/
Conv2D *conv = new Conv2D(*this, input, outChannels, kernelH, kernelW,
strideH, strideW, paddingH, paddingW, activation,
use_bias, kernel_initializer, bias_initializer);
layers.push_back(conv);
return conv->outputs[0];
}
Conv2D* FFModel::conv2d(int inChannels,
int outChannels,
int kernelH, int kernelW,
int strideH, int strideW,
int paddingH, int paddingW,
ActiMode activation,
bool use_bias,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
Conv2D *conv = new Conv2D(*this, inChannels, outChannels, kernelH, kernelW,
strideH, strideW, paddingH, paddingW, activation,
use_bias, kernel_initializer, bias_initializer);
layers.push_back(conv);
return conv;
}
/*
locals[0] = kernel
locals[1] = bias
*/
Conv2D::Conv2D(FFModel& model,
const Tensor& _input,
int out_dim,
int _kernel_h, int _kernel_w,
int _stride_h, int _stride_w,
int _padding_h, int _padding_w,
ActiMode _activation,
bool _use_bias,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), _input),
in_channels(_input.adim[2]), out_channels(out_dim),
kernel_h(_kernel_h), kernel_w(_kernel_w),
stride_h(_stride_h), stride_w(_stride_w),
padding_h(_padding_h), padding_w(_padding_w),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
assert(_input.numDim == 4);
// Set output shape
int input_w = inputs[0].adim[0];
int input_h = inputs[0].adim[1];
int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w;
int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h;
int output_c = out_channels;
int output_n = inputs[0].adim[3];
outputs[0].numDim = 4;
outputs[0].adim[0] = output_w;
outputs[0].adim[1] = output_h;
outputs[0].adim[2] = output_c;
outputs[0].adim[3] = output_n;
}
Conv2D::Conv2D(FFModel& model,
int in_dim, int out_dim,
int _kernel_h, int _kernel_w,
int _stride_h, int _stride_w,
int _padding_h, int _padding_w,
ActiMode _activation,
bool _use_bias,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), 1),
in_channels(in_dim), out_channels(out_dim),
kernel_h(_kernel_h), kernel_w(_kernel_w),
stride_h(_stride_h), stride_w(_stride_w),
padding_h(_padding_h), padding_w(_padding_w),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
}
Tensor Conv2D::init_inout(FFModel& model, const Tensor& _input)
{
assert(_input.numDim == 4);
assert(_input.adim[2] == in_channels);
inputs[0] = _input;
create_output_and_partition(model);
return outputs[0];
}
void Conv2D::create_weights(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
// Create kernel
{
const int dims[4] = {out_channels, in_channels, kernel_h, kernel_w};
weights[0] = model.create_conv_weight<4>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, kernel_initializer);
numWeights = 1;
}
// Create bias tensor
if (use_bias) {
const int dims[1] = {out_channels};
weights[1] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, bias_initializer);
numWeights = 2;
}
}
void Conv2D::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Create output tensor
int input_w = inputs[0].adim[0];
int input_h = inputs[0].adim[1];
int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w;
int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h;
int output_c = out_channels;
int output_n = inputs[0].adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
{
const int dims[4] = {output_n, output_c, output_h, output_w};
outputs[0] = model.create_tensor<4>(dims, (IndexSpaceT<4>)task_is, DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<4> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
// Currently assume we didn't split across the channel dimension
assert(num_par_c == 1);
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]);
}
}
cudnnConvolutionFwdAlgo_t
selectConvolutionForwardAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t yDesc, void* y);
cudnnConvolutionBwdFilterAlgo_t
selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnFilterDescriptor_t dwDesc, void* dw);
cudnnConvolutionBwdDataAlgo_t
selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t dxDesc, void* dx);
/*
regions[0]: input
regions[1]: output
regions[2](I): filter
regions[3](I): bias
regions[4](O): filter_grad
regions[5](O): input_grad
*/
__host__
OpMeta* Conv2D::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 6);
assert(task->regions.size() == 6);
const Conv2D* conv = (Conv2D*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 4> acc_kernel(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_kernel_grad(
regions[4], task->regions[4], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorW<float, 4> acc_input_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
false/*readOutput*/);
Conv2DMeta* m = new Conv2DMeta(handle);
m->relu = conv->activation == AC_MODE_RELU;
int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1;
int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1;
int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1;
printf("init conv (input): n(%d) c(%d) h(%d) w(%d)\n",
input_n, input_c, input_h, input_w);
printf("init conv (output): n(%d) c(%d) h(%d) w(%d)\n",
output_n, output_c, output_h, output_w);
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n,
input_c,
input_h,
input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_c,
1,
1));
printf("filterDim: kernel(%d %d) c_in(%d), c_out(%d)\n", conv->kernel_h, conv->kernel_w, input_c, output_c);
checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
output_c,
input_c,
conv->kernel_h,
conv->kernel_w));
//printf("convDim: padding(%d %d) stride(%d %d)\n", conv->padding_h, conv->padding_w, conv->stride_h, conv->stride_w);
int pad_h = ((output_h - 1) * conv->stride_h + conv->kernel_h - input_h + 1) / 2;
int pad_w = ((output_w - 1) * conv->stride_w + conv->kernel_w - input_w + 1) / 2;
if (pad_h != conv->padding_h)
printf("Warning: changing conv_padding_h to satisfy output_h size\n");
if (pad_w != conv->padding_w)
printf("Warning: changing conv_padding_w to satisfy output_w size\n");
checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc,
pad_h,//conv->padding_h,
pad_w,//conv->padding_w,
conv->stride_h,
conv->stride_w,
1/*upscale_x*/,
1/*upscale_y*/,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
// enable tensor core when possible
checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc,
m->inputTensor,
m->filterDesc,
&n, &c, &h, &w));
assert(n == output_n);
assert(c == output_c);
assert(h == output_h);
assert(w == output_w);
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
n, c, h, w));
// select forward algorithm
m->fwdAlgo = selectConvolutionForwardAlgorithm(m->handle.dnn, m->inputTensor, acc_input.ptr,
m->filterDesc, acc_kernel.ptr, m->convDesc,
m->handle.workSpace, m->handle.workSpaceSize,
m->outputTensor, acc_output.ptr);
// select backward filter algorithm
m->bwdFilterAlgo = selectConvolutionBackwardFilterAlgorithm(
m->handle.dnn, m->inputTensor, acc_input.ptr,
m->outputTensor, acc_output.ptr,
m->convDesc, m->handle.workSpace, m->handle.workSpaceSize,
m->filterDesc, acc_kernel_grad.ptr);
// select backward data algorithm
m->bwdDataAlgo = selectConvolutionBackwardDataAlgorithm(
m->handle.dnn, m->filterDesc, acc_kernel.ptr,
m->outputTensor, acc_output.ptr,
m->convDesc, m->handle.workSpace, m->handle.workSpaceSize,
m->inputTensor, acc_input_grad.ptr);
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
return m;
}
void Conv2D::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(CONV2D_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Conv2D)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(4, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(5, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
void Conv2D::forward_kernel(const Conv2DMeta* m,
const float* input_ptr,
float* output_ptr,
const float* filter_ptr,
const float* bias_ptr)
{
float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(m->handle.dnn, &alpha,
m->inputTensor, input_ptr,
m->filterDesc, filter_ptr,
m->convDesc, m->fwdAlgo,
m->handle.workSpace, m->handle.workSpaceSize,
&beta, m->outputTensor, output_ptr));
checkCUDNN(cudnnAddTensor(m->handle.dnn, &alpha, m->biasTensor,
bias_ptr, &alpha, m->outputTensor, output_ptr));
if (m->relu) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr,
&beta, m->outputTensor, output_ptr));
}
}
/*
regions[0](I): input
regions[1](O): output
regions[2](I): filter
regions[3](I): bias
*/
__host__
void Conv2D::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
Conv2D* conv = (Conv2D*) task->args;
const Conv2DMeta* m = *((Conv2DMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 4> acc_kernel(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
//printf("fwdAlgo(%d), bwdFilterALgo(%d), bwdDataAlgo(%d)\n", (int)m->fwdAlgo,(int) m->bwdFilterAlgo,(int) m->bwdDataAlgo);
hipEvent_t t_start, t_end;
if (conv->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
conv->forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias.ptr);
if (conv->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
//print_tensor<4, float>(acc_input.ptr, acc_input.rect, "[Conv2D:forward:input]");
//print_tensor<4, float>(acc_kernel.ptr, acc_kernel.rect, "[Conv2D:forward:kernel]");
//print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Conv2D:forward:bias]");
//print_tensor<4, float>(acc_output.ptr, acc_output.rect, "[Conv2D:forward:output]");
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Conv2D forward time (CF) = %.2fms\n", elapsed);
}
}
__host__
void Conv2D::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(CONV2D_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Conv2D)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
void Conv2D::backward_kernel(const Conv2DMeta* m,
const float* input_ptr,
float* input_grad_ptr,
const float* output_ptr,
float* output_grad_ptr,
const float* kernel_ptr,
float* kernel_grad_ptr,
float* bias_grad_ptr)
{
float alpha = 1.0f;
//float beta = 0.0f;
if (m->relu) {
cudnnDataType_t dataType;
int n, c, h, w, nStride, cStride, hStride, wStride;
checkCUDNN(cudnnGetTensor4dDescriptor(m->outputTensor, &dataType,
&n, &c, &h, &w, &nStride, &cStride, &hStride, &wStride));
hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n*c*h*w)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, n*c*h*w);
}
// Compute filter gradiant
// NOTE: we use alpha for kernel_grad to accumulate gradients
checkCUDNN(cudnnConvolutionBackwardFilter(m->handle.dnn, &alpha,
m->inputTensor, input_ptr,
m->outputTensor, output_grad_ptr,
m->convDesc, m->bwdFilterAlgo,
m->handle.workSpace, m->handle.workSpaceSize,
&alpha, m->filterDesc, kernel_grad_ptr));
// Compute bias gradiant
// NOTE: we use alpha for bias_grad to accumulate gradients
checkCUDNN(cudnnConvolutionBackwardBias(m->handle.dnn, &alpha,
m->outputTensor, output_grad_ptr,
&alpha, m->biasTensor, bias_grad_ptr));
// Compute data gradiant
// NOTE: we use alpha for input_grad to accumulate gradients
checkCUDNN(cudnnConvolutionBackwardData(m->handle.dnn, &alpha,
m->filterDesc, kernel_ptr,
m->outputTensor, output_grad_ptr,
m->convDesc, m->bwdDataAlgo,
m->handle.workSpace, m->handle.workSpaceSize,
&alpha, m->inputTensor, input_grad_ptr));
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): filter
regions[5](I/O): filter_grad
regions[6](I/O): bias_grad
*/
__host__
void Conv2D::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
Conv2D* conv = (Conv2D*) task->args;
const Conv2DMeta* m = *((Conv2DMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_input_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 4> acc_output(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output_grad(
regions[3], task->regions[3], FID_DATA, ctx, runtime,
true/*rreadOutput*/);
TensorAccessorR<float, 4> acc_kernel(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_kernel_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorW<float, 1> acc_bias_grad(
regions[6], task->regions[6], FID_DATA, ctx, runtime,
true/*readOutput*/);
hipEvent_t t_start, t_end;
if (conv->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
conv->backward_kernel(m, acc_input.ptr, acc_input_grad.ptr,
acc_output.ptr, acc_output_grad.ptr,
acc_kernel.ptr, acc_kernel_grad.ptr,
acc_bias_grad.ptr);
if (conv->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Conv2D backward time = %.2fms\n", elapsed);
//print_tensor<4, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Conv2D:backward:output_grad]");
//print_tensor<4, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Conv2D:backward:kernel_grad]");
//print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Conv2D:backward:bias_grad]");
//print_tensor<4, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Conv2D:backward:input_grad]");
}
}
__host__
void Conv2D::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(CONV2D_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Conv2D)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
// TODO: remove this line
//if (first_layer)
//fm.wait_all_results();
}
#ifdef DEADCODE
/*
regions[0](I/O): filter
regions[1](I): filter_grad
regions[2](I/O): bias
regions[3](I): bias_grad
*/
__host__
void Conv2D::update_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const Conv2D* conv = (Conv2D*) task->args;
const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA);
const AccessorRO<float, 1> acc_filter_grad(regions[1], FID_DATA);
const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias_grad(regions[3], FID_DATA);
Rect<1> rect_filter, rect_filter_grad, rect_bias, rect_bias_grad;
rect_filter =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_filter_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_bias =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
size_t filter_size = rect_filter.volume();
size_t bias_size = rect_bias.volume();
assert(filter_size == conv->in_channels * conv->out_channels
* conv->kernel_w * conv->kernel_h);
assert(bias_size == conv->out_channels);
assert(filter_size * conv->num_replica == rect_filter_grad.volume());
assert(bias_size * conv->num_replica == rect_bias_grad.volume());
assert(acc_filter.accessor.is_dense_arbitrary(rect_filter));
assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
float *filter_ptr = acc_filter.ptr(rect_filter.lo);
const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
updateGAS(filter_ptr, filter_grad_ptr, filter_size,
conv->num_replica, conv->learning_rate);
updateGAS(bias_ptr, bias_grad_ptr, bias_size,
conv->num_replica, conv->learning_rate);
}
__host__
void Conv2D::update(const FFModel& ff)
{
// Synchronize the learning rate
learning_rate = ff.config.learningRate;
assert(num_replica > 0);
// Only aggregate parameters if more than one replica
if (num_replica > 1) {
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
TaskLauncher launcher(CONV2D_UPD_TASK_ID, TaskArgument(this, sizeof(Conv2D)));
launcher.add_region_requirement(
RegionRequirement(locals[0].region, READ_WRITE, EXCLUSIVE, locals[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[0].region_grad, READ_ONLY, EXCLUSIVE, locals[0].region_grad));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad));
launcher.add_field(3, FID_DATA);
runtime->execute_task(ctx, launcher);
}
}
#endif
/*
__host__
Parameter* Conv2D::get_parameter(int index)
{
if (index == 0) {
return &weights[0];
} else if (index == 1) {
return &weights[1];
} else {
assert(0);
return NULL;
}
}*/
__host__
void Conv2D::print_layer(const FFModel& ff)
{
printf("conv2d layer\n");
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
#if 0
TaskLauncher launcher(CONV2D_PRINT_TASK_ID, TaskArgument(NULL, 0));
launcher.add_region_requirement(
RegionRequirement(kernel.region, READ_ONLY, EXCLUSIVE, kernel.region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(bias.region, READ_ONLY, EXCLUSIVE, bias.region));
launcher.add_field(1, FID_DATA);
Future fu = runtime->execute_task(ctx, launcher);
fu.wait();
#else
RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region);
kernel_req.add_field(FID_DATA);
InlineLauncher kernel_launcher(kernel_req);
PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher);
kernel_region.wait_until_valid();
/*
RegionRequirement kernel_grad_req(kernel.region_grad, READ_WRITE, EXCLUSIVE, kernel.region_grad);
kernel_grad_req.add_field(FID_DATA);
InlineLauncher kernel_grad_launcher(kernel_grad_req);
PhysicalRegion kernel_grad_region = runtime->map_region(ctx, kernel_grad_launcher);
kernel_grad_region.wait_until_valid();
*/
RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region);
bias_req.add_field(FID_DATA);
InlineLauncher bias_launcher(bias_req);
PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher);
bias_region.wait_until_valid();
/*
RegionRequirement bias_grad_req(bias.region_grad, READ_WRITE, EXCLUSIVE, bias.region_grad);
bias_grad_req.add_field(FID_DATA);
InlineLauncher bias_grad_launcher(bias_grad_req);
PhysicalRegion bias_grad_region = runtime->map_region(ctx, bias_grad_launcher);
bias_grad_region.wait_until_valid();
*/
TensorAccessorW<float, 4> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true);
// const AccessorRW<float, 1> acc_kernel_grad(kernel_grad_region, FID_DATA);
TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true);
//const AccessorRW<float, 1> acc_bias_grad(bias_grad_region, FID_DATA);
const float *kernel_ptr = acc_kernel.ptr;
//float *kernel_grad_ptr = acc_kernel_grad.ptr;
const float *bias_ptr = acc_bias.ptr;
//float *bias_grad_ptr = acc_bias_grad.ptr;
size_t kernel_size = acc_kernel.rect.volume();
int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1;
int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1;
int kernel_dim3 = acc_kernel.rect.hi[2] - acc_kernel.rect.lo[2] + 1;
int kernel_dim4 = acc_kernel.rect.hi[3] - acc_kernel.rect.lo[3] + 1;
//size_t kernel_grad_size = rect_kernel_grad.volume();
size_t bias_size = acc_bias.rect.volume();
//size_t bias_grad_size = rect_bias_grad.volume();
printf("kernel, %p, %d, [%d, %d, %d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2, kernel_dim3, kernel_dim4);
//printf("kernel_grad, %d\n", kernel_grad_size);
printf("bias, %p, %d\n", bias_ptr, bias_size);
//printf("bias_grad, %d\n", bias_grad_size);
for (int i = 0; i < bias_size; i++) {
printf("%f ", bias_ptr[i]);
}
printf("\n");
/*
for (int i = 0; i < bias_grad_size; i++) {
printf("%f ", bias_grad_ptr);
bias_grad_ptr ++;
}
printf("\n");*/
for (int i = 0; i < kernel_size; i++) {
printf("%f ", kernel_ptr[i]);
}
printf("\n");
/*
for (int i = 0; i < kernel_grad_size; i++) {
printf("%f ", kernel_grad_ptr);
kernel_grad_ptr ++;
}
printf("\n");
*/
runtime->unmap_region(ctx, kernel_region);
// runtime->unmap_region(ctx, kernel_grad_region);
runtime->unmap_region(ctx, bias_region);
// runtime->unmap_region(ctx, bias_grad_region);
#endif
}
cudnnConvolutionFwdAlgo_t
selectConvolutionForwardAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t yDesc, void* y)
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
handle, xDesc, x, wDesc, w, convDesc, yDesc, y,
reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
printf("forwardAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time);
return perfResults[0].algo;
}
cudnnConvolutionBwdFilterAlgo_t
selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnFilterDescriptor_t dwDesc, void* dw)
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx(
handle, xDesc, x, dyDesc, dy, convDesc, dwDesc, dw,
reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
printf("bwdFilterAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time);
return perfResults[0].algo;
}
cudnnConvolutionBwdDataAlgo_t
selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t dxDesc, void* dx)
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx(
handle, wDesc, w, dyDesc, dy, convDesc, dxDesc, dx,
reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
printf("bwdDataAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time);
return perfResults[0].algo;
}
Conv2DMeta::Conv2DMeta(FFHandler handler)
: OpMeta(handler)
{
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
}
bool Conv2D::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
Tensor sub_output, sub_input;
if(!outputs[0].get_output_sub_tensor(pc, sub_output, OP_CONV2D))
return false;
if(!inputs[0].get_input_sub_tensor(pc, sub_input, OP_CONV2D))
return false;
int input_w = sub_input.adim[0];
int input_h = sub_input.adim[1];
int input_c = sub_input.adim[2];
int input_n = sub_input.adim[3];
int output_w = sub_output.adim[0];
int output_h = sub_output.adim[1];
int output_c = sub_output.adim[2];
int output_n = sub_output.adim[3];
int pad_h = ((output_h - 1) * stride_h + kernel_h - input_h + 1) / 2;
int pad_w = ((output_w - 1) * stride_w + kernel_w - input_w + 1) / 2;
Conv2DMeta* m = sim->conv2d_meta;
m->relu = activation == AC_MODE_RELU;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, output_c, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, output_c, input_c, kernel_h, kernel_w));
checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h, pad_w,
stride_h, stride_w, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc,
m->inputTensor, m->filterDesc, &n, &c, &h, &w));
assert(n == output_n);
assert(c == output_c);
assert(h == output_h);
assert(w == output_w);
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
// allocate tensors in simulator
sim->free_all();
float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert(input_ptr != NULL);
float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert(output_ptr != NULL);
float* weight_ptr = (float*)sim->allocate((size_t)output_c * input_c * kernel_h * kernel_w, DT_FLOAT);
assert(weight_ptr != NULL);
float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT);
assert(bias_ptr != NULL);
// select forward algorithm
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
m->handle.dnn, m->inputTensor, input_ptr,
m->filterDesc, weight_ptr, m->convDesc, m->outputTensor, output_ptr,
reqAlgCnt, &cnt, perfResults,
m->handle.workSpace, m->handle.workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
forward_time = perfResults[0].time;
}
// select forward algorithm
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx(
m->handle.dnn, m->inputTensor, input_ptr,
m->outputTensor, output_ptr, m->convDesc, m->filterDesc, weight_ptr,
reqAlgCnt, &cnt, perfResults,
m->handle.workSpace, m->handle.workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
backward_time = perfResults[0].time;
}
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx(
m->handle.dnn, m->filterDesc, weight_ptr,
m->outputTensor, output_ptr, m->convDesc, m->inputTensor, input_ptr,
reqAlgCnt, &cnt, perfResults,
m->handle.workSpace, m->handle.workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
backward_time += perfResults[0].time;
}
return true;
}
| 4b630517059d6f02c02c1f2198a7fae9b5877090.cu | /* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::conv2d(const Tensor& input,
int outChannels,
int kernelH, int kernelW,
int strideH, int strideW,
int paddingH, int paddingW,
ActiMode activation,
bool use_bias,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
assert(input.numDim == 4); /*NCHW*/
Conv2D *conv = new Conv2D(*this, input, outChannels, kernelH, kernelW,
strideH, strideW, paddingH, paddingW, activation,
use_bias, kernel_initializer, bias_initializer);
layers.push_back(conv);
return conv->outputs[0];
}
Conv2D* FFModel::conv2d(int inChannels,
int outChannels,
int kernelH, int kernelW,
int strideH, int strideW,
int paddingH, int paddingW,
ActiMode activation,
bool use_bias,
Initializer* kernel_initializer,
Initializer* bias_initializer)
{
if (kernel_initializer == NULL) {
int seed = std::rand();
kernel_initializer = new GlorotUniform(seed);
}
if (bias_initializer == NULL) {
bias_initializer = new ZeroInitializer();
}
Conv2D *conv = new Conv2D(*this, inChannels, outChannels, kernelH, kernelW,
strideH, strideW, paddingH, paddingW, activation,
use_bias, kernel_initializer, bias_initializer);
layers.push_back(conv);
return conv;
}
/*
locals[0] = kernel
locals[1] = bias
*/
Conv2D::Conv2D(FFModel& model,
const Tensor& _input,
int out_dim,
int _kernel_h, int _kernel_w,
int _stride_h, int _stride_w,
int _padding_h, int _padding_w,
ActiMode _activation,
bool _use_bias,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), _input),
in_channels(_input.adim[2]), out_channels(out_dim),
kernel_h(_kernel_h), kernel_w(_kernel_w),
stride_h(_stride_h), stride_w(_stride_w),
padding_h(_padding_h), padding_w(_padding_w),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
assert(_input.numDim == 4);
// Set output shape
int input_w = inputs[0].adim[0];
int input_h = inputs[0].adim[1];
int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w;
int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h;
int output_c = out_channels;
int output_n = inputs[0].adim[3];
outputs[0].numDim = 4;
outputs[0].adim[0] = output_w;
outputs[0].adim[1] = output_h;
outputs[0].adim[2] = output_c;
outputs[0].adim[3] = output_n;
}
Conv2D::Conv2D(FFModel& model,
int in_dim, int out_dim,
int _kernel_h, int _kernel_w,
int _stride_h, int _stride_w,
int _padding_h, int _padding_w,
ActiMode _activation,
bool _use_bias,
Initializer* _kernel_initializer,
Initializer* _bias_initializer)
: Op(model, "Conv2D_"+std::to_string(_kernel_h)+std::to_string(_kernel_w), 1),
in_channels(in_dim), out_channels(out_dim),
kernel_h(_kernel_h), kernel_w(_kernel_w),
stride_h(_stride_h), stride_w(_stride_w),
padding_h(_padding_h), padding_w(_padding_w),
activation(_activation), use_bias(_use_bias),
kernel_initializer(_kernel_initializer),
bias_initializer(_bias_initializer),
profiling(model.config.profiling)
{
}
Tensor Conv2D::init_inout(FFModel& model, const Tensor& _input)
{
assert(_input.numDim == 4);
assert(_input.adim[2] == in_channels);
inputs[0] = _input;
create_output_and_partition(model);
return outputs[0];
}
void Conv2D::create_weights(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
// Create kernel
{
const int dims[4] = {out_channels, in_channels, kernel_h, kernel_w};
weights[0] = model.create_conv_weight<4>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, kernel_initializer);
numWeights = 1;
}
// Create bias tensor
if (use_bias) {
const int dims[1] = {out_channels};
weights[1] = model.create_conv_weight<1>(this, dims, (IndexSpaceT<4>)task_is, DT_FLOAT, bias_initializer);
numWeights = 2;
}
}
void Conv2D::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<4>(model.get_or_create_task_is(4, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Create output tensor
int input_w = inputs[0].adim[0];
int input_h = inputs[0].adim[1];
int output_w = 1 + (input_w + 2 * padding_w - kernel_w) / stride_w;
int output_h = 1 + (input_h + 2 * padding_h - kernel_h) / stride_h;
int output_c = out_channels;
int output_n = inputs[0].adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
{
const int dims[4] = {output_n, output_c, output_h, output_w};
outputs[0] = model.create_tensor<4>(dims, (IndexSpaceT<4>)task_is, DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<4> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
// Currently assume we didn't split across the channel dimension
assert(num_par_c == 1);
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition(
inputs[0], (IndexSpaceT<4>)task_is, input_lps[0], input_grad_lps[0]);
}
}
cudnnConvolutionFwdAlgo_t
selectConvolutionForwardAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t yDesc, void* y);
cudnnConvolutionBwdFilterAlgo_t
selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnFilterDescriptor_t dwDesc, void* dw);
cudnnConvolutionBwdDataAlgo_t
selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t dxDesc, void* dx);
/*
regions[0]: input
regions[1]: output
regions[2](I): filter
regions[3](I): bias
regions[4](O): filter_grad
regions[5](O): input_grad
*/
__host__
OpMeta* Conv2D::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 6);
assert(task->regions.size() == 6);
const Conv2D* conv = (Conv2D*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 4> acc_kernel(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_kernel_grad(
regions[4], task->regions[4], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorW<float, 4> acc_input_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
false/*readOutput*/);
Conv2DMeta* m = new Conv2DMeta(handle);
m->relu = conv->activation == AC_MODE_RELU;
int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1;
int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1;
int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1;
printf("init conv (input): n(%d) c(%d) h(%d) w(%d)\n",
input_n, input_c, input_h, input_w);
printf("init conv (output): n(%d) c(%d) h(%d) w(%d)\n",
output_n, output_c, output_h, output_w);
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n,
input_c,
input_h,
input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_c,
1,
1));
printf("filterDim: kernel(%d %d) c_in(%d), c_out(%d)\n", conv->kernel_h, conv->kernel_w, input_c, output_c);
checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc,
CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW,
output_c,
input_c,
conv->kernel_h,
conv->kernel_w));
//printf("convDim: padding(%d %d) stride(%d %d)\n", conv->padding_h, conv->padding_w, conv->stride_h, conv->stride_w);
int pad_h = ((output_h - 1) * conv->stride_h + conv->kernel_h - input_h + 1) / 2;
int pad_w = ((output_w - 1) * conv->stride_w + conv->kernel_w - input_w + 1) / 2;
if (pad_h != conv->padding_h)
printf("Warning: changing conv_padding_h to satisfy output_h size\n");
if (pad_w != conv->padding_w)
printf("Warning: changing conv_padding_w to satisfy output_w size\n");
checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc,
pad_h,//conv->padding_h,
pad_w,//conv->padding_w,
conv->stride_h,
conv->stride_w,
1/*upscale_x*/,
1/*upscale_y*/,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT));
// enable tensor core when possible
checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc,
m->inputTensor,
m->filterDesc,
&n, &c, &h, &w));
assert(n == output_n);
assert(c == output_c);
assert(h == output_h);
assert(w == output_w);
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
n, c, h, w));
// select forward algorithm
m->fwdAlgo = selectConvolutionForwardAlgorithm(m->handle.dnn, m->inputTensor, acc_input.ptr,
m->filterDesc, acc_kernel.ptr, m->convDesc,
m->handle.workSpace, m->handle.workSpaceSize,
m->outputTensor, acc_output.ptr);
// select backward filter algorithm
m->bwdFilterAlgo = selectConvolutionBackwardFilterAlgorithm(
m->handle.dnn, m->inputTensor, acc_input.ptr,
m->outputTensor, acc_output.ptr,
m->convDesc, m->handle.workSpace, m->handle.workSpaceSize,
m->filterDesc, acc_kernel_grad.ptr);
// select backward data algorithm
m->bwdDataAlgo = selectConvolutionBackwardDataAlgorithm(
m->handle.dnn, m->filterDesc, acc_kernel.ptr,
m->outputTensor, acc_output.ptr,
m->convDesc, m->handle.workSpace, m->handle.workSpaceSize,
m->inputTensor, acc_input_grad.ptr);
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
return m;
}
void Conv2D::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(CONV2D_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Conv2D)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(4, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(5, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
void Conv2D::forward_kernel(const Conv2DMeta* m,
const float* input_ptr,
float* output_ptr,
const float* filter_ptr,
const float* bias_ptr)
{
float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(m->handle.dnn, &alpha,
m->inputTensor, input_ptr,
m->filterDesc, filter_ptr,
m->convDesc, m->fwdAlgo,
m->handle.workSpace, m->handle.workSpaceSize,
&beta, m->outputTensor, output_ptr));
checkCUDNN(cudnnAddTensor(m->handle.dnn, &alpha, m->biasTensor,
bias_ptr, &alpha, m->outputTensor, output_ptr));
if (m->relu) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr,
&beta, m->outputTensor, output_ptr));
}
}
/*
regions[0](I): input
regions[1](O): output
regions[2](I): filter
regions[3](I): bias
*/
__host__
void Conv2D::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
Conv2D* conv = (Conv2D*) task->args;
const Conv2DMeta* m = *((Conv2DMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
TensorAccessorR<float, 4> acc_kernel(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
//printf("fwdAlgo(%d), bwdFilterALgo(%d), bwdDataAlgo(%d)\n", (int)m->fwdAlgo,(int) m->bwdFilterAlgo,(int) m->bwdDataAlgo);
cudaEvent_t t_start, t_end;
if (conv->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
conv->forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias.ptr);
if (conv->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
//print_tensor<4, float>(acc_input.ptr, acc_input.rect, "[Conv2D:forward:input]");
//print_tensor<4, float>(acc_kernel.ptr, acc_kernel.rect, "[Conv2D:forward:kernel]");
//print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Conv2D:forward:bias]");
//print_tensor<4, float>(acc_output.ptr, acc_output.rect, "[Conv2D:forward:output]");
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Conv2D forward time (CF) = %.2fms\n", elapsed);
}
}
__host__
void Conv2D::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(CONV2D_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Conv2D)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
void Conv2D::backward_kernel(const Conv2DMeta* m,
const float* input_ptr,
float* input_grad_ptr,
const float* output_ptr,
float* output_grad_ptr,
const float* kernel_ptr,
float* kernel_grad_ptr,
float* bias_grad_ptr)
{
float alpha = 1.0f;
//float beta = 0.0f;
if (m->relu) {
cudnnDataType_t dataType;
int n, c, h, w, nStride, cStride, hStride, wStride;
checkCUDNN(cudnnGetTensor4dDescriptor(m->outputTensor, &dataType,
&n, &c, &h, &w, &nStride, &cStride, &hStride, &wStride));
reluBackward<<<GET_BLOCKS(n*c*h*w), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, n*c*h*w);
}
// Compute filter gradiant
// NOTE: we use alpha for kernel_grad to accumulate gradients
checkCUDNN(cudnnConvolutionBackwardFilter(m->handle.dnn, &alpha,
m->inputTensor, input_ptr,
m->outputTensor, output_grad_ptr,
m->convDesc, m->bwdFilterAlgo,
m->handle.workSpace, m->handle.workSpaceSize,
&alpha, m->filterDesc, kernel_grad_ptr));
// Compute bias gradiant
// NOTE: we use alpha for bias_grad to accumulate gradients
checkCUDNN(cudnnConvolutionBackwardBias(m->handle.dnn, &alpha,
m->outputTensor, output_grad_ptr,
&alpha, m->biasTensor, bias_grad_ptr));
// Compute data gradiant
// NOTE: we use alpha for input_grad to accumulate gradients
checkCUDNN(cudnnConvolutionBackwardData(m->handle.dnn, &alpha,
m->filterDesc, kernel_ptr,
m->outputTensor, output_grad_ptr,
m->convDesc, m->bwdDataAlgo,
m->handle.workSpace, m->handle.workSpaceSize,
&alpha, m->inputTensor, input_grad_ptr));
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): filter
regions[5](I/O): filter_grad
regions[6](I/O): bias_grad
*/
__host__
void Conv2D::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
Conv2D* conv = (Conv2D*) task->args;
const Conv2DMeta* m = *((Conv2DMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_input_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, 4> acc_output(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output_grad(
regions[3], task->regions[3], FID_DATA, ctx, runtime,
true/*rreadOutput*/);
TensorAccessorR<float, 4> acc_kernel(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_kernel_grad(
regions[5], task->regions[5], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorW<float, 1> acc_bias_grad(
regions[6], task->regions[6], FID_DATA, ctx, runtime,
true/*readOutput*/);
cudaEvent_t t_start, t_end;
if (conv->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
conv->backward_kernel(m, acc_input.ptr, acc_input_grad.ptr,
acc_output.ptr, acc_output_grad.ptr,
acc_kernel.ptr, acc_kernel_grad.ptr,
acc_bias_grad.ptr);
if (conv->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Conv2D backward time = %.2fms\n", elapsed);
//print_tensor<4, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Conv2D:backward:output_grad]");
//print_tensor<4, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Conv2D:backward:kernel_grad]");
//print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Conv2D:backward:bias_grad]");
//print_tensor<4, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Conv2D:backward:input_grad]");
}
}
__host__
void Conv2D::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(CONV2D_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Conv2D)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
// TODO: remove this line
//if (first_layer)
//fm.wait_all_results();
}
#ifdef DEADCODE
/*
regions[0](I/O): filter
regions[1](I): filter_grad
regions[2](I/O): bias
regions[3](I): bias_grad
*/
__host__
void Conv2D::update_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const Conv2D* conv = (Conv2D*) task->args;
const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA);
const AccessorRO<float, 1> acc_filter_grad(regions[1], FID_DATA);
const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias_grad(regions[3], FID_DATA);
Rect<1> rect_filter, rect_filter_grad, rect_bias, rect_bias_grad;
rect_filter =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_filter_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_bias =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
size_t filter_size = rect_filter.volume();
size_t bias_size = rect_bias.volume();
assert(filter_size == conv->in_channels * conv->out_channels
* conv->kernel_w * conv->kernel_h);
assert(bias_size == conv->out_channels);
assert(filter_size * conv->num_replica == rect_filter_grad.volume());
assert(bias_size * conv->num_replica == rect_bias_grad.volume());
assert(acc_filter.accessor.is_dense_arbitrary(rect_filter));
assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
float *filter_ptr = acc_filter.ptr(rect_filter.lo);
const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
updateGAS(filter_ptr, filter_grad_ptr, filter_size,
conv->num_replica, conv->learning_rate);
updateGAS(bias_ptr, bias_grad_ptr, bias_size,
conv->num_replica, conv->learning_rate);
}
__host__
void Conv2D::update(const FFModel& ff)
{
// Synchronize the learning rate
learning_rate = ff.config.learningRate;
assert(num_replica > 0);
// Only aggregate parameters if more than one replica
if (num_replica > 1) {
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
TaskLauncher launcher(CONV2D_UPD_TASK_ID, TaskArgument(this, sizeof(Conv2D)));
launcher.add_region_requirement(
RegionRequirement(locals[0].region, READ_WRITE, EXCLUSIVE, locals[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[0].region_grad, READ_ONLY, EXCLUSIVE, locals[0].region_grad));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad));
launcher.add_field(3, FID_DATA);
runtime->execute_task(ctx, launcher);
}
}
#endif
/*
__host__
Parameter* Conv2D::get_parameter(int index)
{
if (index == 0) {
return &weights[0];
} else if (index == 1) {
return &weights[1];
} else {
assert(0);
return NULL;
}
}*/
__host__
void Conv2D::print_layer(const FFModel& ff)
{
printf("conv2d layer\n");
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
#if 0
TaskLauncher launcher(CONV2D_PRINT_TASK_ID, TaskArgument(NULL, 0));
launcher.add_region_requirement(
RegionRequirement(kernel.region, READ_ONLY, EXCLUSIVE, kernel.region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(bias.region, READ_ONLY, EXCLUSIVE, bias.region));
launcher.add_field(1, FID_DATA);
Future fu = runtime->execute_task(ctx, launcher);
fu.wait();
#else
RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region);
kernel_req.add_field(FID_DATA);
InlineLauncher kernel_launcher(kernel_req);
PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher);
kernel_region.wait_until_valid();
/*
RegionRequirement kernel_grad_req(kernel.region_grad, READ_WRITE, EXCLUSIVE, kernel.region_grad);
kernel_grad_req.add_field(FID_DATA);
InlineLauncher kernel_grad_launcher(kernel_grad_req);
PhysicalRegion kernel_grad_region = runtime->map_region(ctx, kernel_grad_launcher);
kernel_grad_region.wait_until_valid();
*/
RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region);
bias_req.add_field(FID_DATA);
InlineLauncher bias_launcher(bias_req);
PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher);
bias_region.wait_until_valid();
/*
RegionRequirement bias_grad_req(bias.region_grad, READ_WRITE, EXCLUSIVE, bias.region_grad);
bias_grad_req.add_field(FID_DATA);
InlineLauncher bias_grad_launcher(bias_grad_req);
PhysicalRegion bias_grad_region = runtime->map_region(ctx, bias_grad_launcher);
bias_grad_region.wait_until_valid();
*/
TensorAccessorW<float, 4> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true);
// const AccessorRW<float, 1> acc_kernel_grad(kernel_grad_region, FID_DATA);
TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true);
//const AccessorRW<float, 1> acc_bias_grad(bias_grad_region, FID_DATA);
const float *kernel_ptr = acc_kernel.ptr;
//float *kernel_grad_ptr = acc_kernel_grad.ptr;
const float *bias_ptr = acc_bias.ptr;
//float *bias_grad_ptr = acc_bias_grad.ptr;
size_t kernel_size = acc_kernel.rect.volume();
int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1;
int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1;
int kernel_dim3 = acc_kernel.rect.hi[2] - acc_kernel.rect.lo[2] + 1;
int kernel_dim4 = acc_kernel.rect.hi[3] - acc_kernel.rect.lo[3] + 1;
//size_t kernel_grad_size = rect_kernel_grad.volume();
size_t bias_size = acc_bias.rect.volume();
//size_t bias_grad_size = rect_bias_grad.volume();
printf("kernel, %p, %d, [%d, %d, %d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2, kernel_dim3, kernel_dim4);
//printf("kernel_grad, %d\n", kernel_grad_size);
printf("bias, %p, %d\n", bias_ptr, bias_size);
//printf("bias_grad, %d\n", bias_grad_size);
for (int i = 0; i < bias_size; i++) {
printf("%f ", bias_ptr[i]);
}
printf("\n");
/*
for (int i = 0; i < bias_grad_size; i++) {
printf("%f ", bias_grad_ptr);
bias_grad_ptr ++;
}
printf("\n");*/
for (int i = 0; i < kernel_size; i++) {
printf("%f ", kernel_ptr[i]);
}
printf("\n");
/*
for (int i = 0; i < kernel_grad_size; i++) {
printf("%f ", kernel_grad_ptr);
kernel_grad_ptr ++;
}
printf("\n");
*/
runtime->unmap_region(ctx, kernel_region);
// runtime->unmap_region(ctx, kernel_grad_region);
runtime->unmap_region(ctx, bias_region);
// runtime->unmap_region(ctx, bias_grad_region);
#endif
}
cudnnConvolutionFwdAlgo_t
selectConvolutionForwardAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t yDesc, void* y)
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
handle, xDesc, x, wDesc, w, convDesc, yDesc, y,
reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
printf("forwardAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time);
return perfResults[0].algo;
}
cudnnConvolutionBwdFilterAlgo_t
selectConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
const cudnnTensorDescriptor_t xDesc, const void* x,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnFilterDescriptor_t dwDesc, void* dw)
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx(
handle, xDesc, x, dyDesc, dy, convDesc, dwDesc, dw,
reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
printf("bwdFilterAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time);
return perfResults[0].algo;
}
cudnnConvolutionBwdDataAlgo_t
selectConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
const cudnnFilterDescriptor_t wDesc, const void* w,
const cudnnTensorDescriptor_t dyDesc, const void* dy,
const cudnnConvolutionDescriptor_t convDesc,
void* workSpace, size_t workSpaceSize,
const cudnnTensorDescriptor_t dxDesc, void* dx)
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx(
handle, wDesc, w, dyDesc, dy, convDesc, dxDesc, dx,
reqAlgCnt, &cnt, perfResults, workSpace, workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
printf("bwdDataAlgo(%d) time(%.2lf)\n", perfResults[0].algo, perfResults[0].time);
return perfResults[0].algo;
}
Conv2DMeta::Conv2DMeta(FFHandler handler)
: OpMeta(handler)
{
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
}
bool Conv2D::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
Tensor sub_output, sub_input;
if(!outputs[0].get_output_sub_tensor(pc, sub_output, OP_CONV2D))
return false;
if(!inputs[0].get_input_sub_tensor(pc, sub_input, OP_CONV2D))
return false;
int input_w = sub_input.adim[0];
int input_h = sub_input.adim[1];
int input_c = sub_input.adim[2];
int input_n = sub_input.adim[3];
int output_w = sub_output.adim[0];
int output_h = sub_output.adim[1];
int output_c = sub_output.adim[2];
int output_n = sub_output.adim[3];
int pad_h = ((output_h - 1) * stride_h + kernel_h - input_h + 1) / 2;
int pad_w = ((output_w - 1) * stride_w + kernel_w - input_w + 1) / 2;
Conv2DMeta* m = sim->conv2d_meta;
m->relu = activation == AC_MODE_RELU;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, input_n, input_c, input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, 1, output_c, 1, 1));
checkCUDNN(cudnnSetFilter4dDescriptor(m->filterDesc, CUDNN_DATA_FLOAT,
CUDNN_TENSOR_NCHW, output_c, input_c, kernel_h, kernel_w));
checkCUDNN(cudnnSetConvolution2dDescriptor(m->convDesc, pad_h, pad_w,
stride_h, stride_w, 1/*dilationH*/, 1/*dilationW*/,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
checkCUDNN(cudnnSetConvolutionMathType(m->convDesc, CUDNN_TENSOR_OP_MATH));
int n, c, h, w;
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(m->convDesc,
m->inputTensor, m->filterDesc, &n, &c, &h, &w));
assert(n == output_n);
assert(c == output_c);
assert(h == output_h);
assert(w == output_w);
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT, n, c, h, w));
// allocate tensors in simulator
sim->free_all();
float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert(input_ptr != NULL);
float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert(output_ptr != NULL);
float* weight_ptr = (float*)sim->allocate((size_t)output_c * input_c * kernel_h * kernel_w, DT_FLOAT);
assert(weight_ptr != NULL);
float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT);
assert(bias_ptr != NULL);
// select forward algorithm
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionFwdAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionForwardAlgorithmEx(
m->handle.dnn, m->inputTensor, input_ptr,
m->filterDesc, weight_ptr, m->convDesc, m->outputTensor, output_ptr,
reqAlgCnt, &cnt, perfResults,
m->handle.workSpace, m->handle.workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
forward_time = perfResults[0].time;
}
// select forward algorithm
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdFilterAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardFilterAlgorithmEx(
m->handle.dnn, m->inputTensor, input_ptr,
m->outputTensor, output_ptr, m->convDesc, m->filterDesc, weight_ptr,
reqAlgCnt, &cnt, perfResults,
m->handle.workSpace, m->handle.workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
backward_time = perfResults[0].time;
}
{
const int reqAlgCnt = 8;
int cnt = 0;
cudnnConvolutionBwdDataAlgoPerf_t perfResults[reqAlgCnt];
checkCUDNN(cudnnFindConvolutionBackwardDataAlgorithmEx(
m->handle.dnn, m->filterDesc, weight_ptr,
m->outputTensor, output_ptr, m->convDesc, m->inputTensor, input_ptr,
reqAlgCnt, &cnt, perfResults,
m->handle.workSpace, m->handle.workSpaceSize));
assert(cnt > 0);
checkCUDNN(perfResults[0].status);
backward_time += perfResults[0].time;
}
return true;
}
|
5f3d434c3983a7e88e511bc41b403f9a8cdda65a.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
//cuda
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include<hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
__global__ void kernal(float* a) //
{
a[threadIdx.x] = 1;
}
void cjh()//
{
//
int gpuCount = -1; //
hipGetDeviceCount(&gpuCount); // &
printf("gpuCount: %d\n", gpuCount);
hipSetDevice(gpuCount - 1); //0
//hipSetDevice(0); //
float* aGpu; //
hipMalloc((void**)&aGpu, 16 * sizeof(float));//
float a[16] = { 0 }; //
hipMemcpy(aGpu, a, 16 * sizeof(float), hipMemcpyHostToDevice);//
kernal << <1, 16 >> > (aGpu); //
hipMemcpy(a, aGpu, 16 * sizeof(float), hipMemcpyDeviceToHost);//
for (int i = 0; i < 16; ++i) //
{
printf("%f\n", a[i]);
}
hipFree(aGpu); //
hipDeviceReset(); //
//
hipDeviceProp_t prop; //
hipGetDeviceProperties(&prop, 0);
printf("maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim: %d\n", prop.maxThreadsDim);
printf("maxGridSize: %d\n", prop.maxGridSize);
printf("totalConstMem: %d\n", prop.totalConstMem);
printf("clockRate: %d\n", prop.clockRate);
printf("integrated: %d\n", prop.integrated);
//
int deviceId; //
hipChooseDevice(&deviceId, &prop); //
printf("deviceId: %d\n", deviceId);
//
//int deviceList[2] = { 0,1 };
//hipSetValidDevices(deviceList, 1);
}
| 5f3d434c3983a7e88e511bc41b403f9a8cdda65a.cu | #include<stdio.h>
#include<stdlib.h>
#include<iostream>
//cuda的一些头文件
#include <cuda.h>
#include <curand_kernel.h>
#include<cuda_runtime_api.h>
#include "device_launch_parameters.h"
__global__ void kernal(float* a) //线程函数
{
a[threadIdx.x] = 1;
}
void cjh()//调起函数
{
//读出设备有多少个显卡
int gpuCount = -1; //显卡数量初始化
cudaGetDeviceCount(&gpuCount); //得到显卡数量 注意用了:&
printf("gpuCount: %d\n", gpuCount);
cudaSetDevice(gpuCount - 1); //选择最后一块设备(本机上也就是第0号)
//cudaSetDevice(0); //选择设备
float* aGpu; //定义设备
cudaMalloc((void**)&aGpu, 16 * sizeof(float));//分配显存
float a[16] = { 0 }; //分配内存
cudaMemcpy(aGpu, a, 16 * sizeof(float), cudaMemcpyHostToDevice);//主机到设备
kernal << <1, 16 >> > (aGpu); //调用线程函数
cudaMemcpy(a, aGpu, 16 * sizeof(float), cudaMemcpyDeviceToHost);//设备到主机
for (int i = 0; i < 16; ++i) //输出结果
{
printf("%f\n", a[i]);
}
cudaFree(aGpu); //释放显存
cudaDeviceReset(); //把设定恢复初始化 便于后续换设备
//查设备的一些信息
cudaDeviceProp prop; //定义一个结构体 这个结构体内部由一些数据构成
cudaGetDeviceProperties(&prop, 0);
printf("maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim: %d\n", prop.maxThreadsDim);
printf("maxGridSize: %d\n", prop.maxGridSize);
printf("totalConstMem: %d\n", prop.totalConstMem);
printf("clockRate: %d\n", prop.clockRate);
printf("integrated: %d\n", prop.integrated);
//自动选择最优的设备
int deviceId; //先定义设备编号
cudaChooseDevice(&deviceId, &prop); //传入定义的编号 和 查到的设备信息结构体 自动返回选到的设备编号到定义的数字里去
printf("deviceId: %d\n", deviceId);
//因为本机只有一个设备 如果要分配两个设备会报错 捕捉它的错误 并给出如何分配两个设备
//int deviceList[2] = { 0,1 };
//cudaSetValidDevices(deviceList, 1);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.